Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
|---|---|---|
372,600
|
def alias_config_alias_name(self, **kwargs):
config = ET.Element("config")
alias_config = ET.SubElement(config, "alias-config", xmlns="urn:brocade.com:mgmt:brocade-aaa")
alias = ET.SubElement(alias_config, "alias")
name = ET.SubElement(alias, "name")
name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
372,601
|
def save_to_mat_file(self, parameter_space,
result_parsing_function,
filename, runs):
for key in parameter_space:
if not isinstance(parameter_space[key], list):
parameter_space[key] = [parameter_space[key]]
dimension_labels = [{key: str(parameter_space[key])} for key in
parameter_space.keys() if len(parameter_space[key])
> 1] + [{: range(runs)}]
return savemat(
filename,
{:
self.get_results_as_numpy_array(parameter_space,
result_parsing_function,
runs=runs),
: dimension_labels})
|
Return the results relative to the desired parameter space in the form
of a .mat file.
Args:
parameter_space (dict): dictionary containing
parameter/list-of-values pairs.
result_parsing_function (function): user-defined function, taking a
result dictionary as argument, that can be used to parse the
result files and return a list of values.
filename (path): name of output .mat file.
runs (int): number of runs to gather for each parameter
combination.
|
372,602
|
def group_membership_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/group_memberships
api_path = "/api/v2/group_memberships.json"
return self.call(api_path, method="POST", data=data, **kwargs)
|
https://developer.zendesk.com/rest_api/docs/core/group_memberships#create-membership
|
372,603
|
def render_unregistered(error=None):
return template(
read_index_template(),
registered=False,
error=error,
seeder_data=None,
url_id=None,
)
|
Render template file for the unregistered user.
Args:
error (str, default None): Optional error message.
Returns:
str: Template filled with data.
|
372,604
|
def generate_df(js_dict, naming, value="value"):
values = []
dimensions, dim_names = get_dimensions(js_dict, naming)
values = get_values(js_dict, value=value)
output = pd.DataFrame([category + [values[i]]
for i, category in
enumerate(get_df_row(dimensions, naming))])
output.columns = dim_names + [value]
output.index = range(0, len(values))
return output
|
Decode JSON-stat dict into pandas.DataFrame object. Helper method \
that should be called inside from_json_stat().
Args:
js_dict(OrderedDict): OrderedDict with data in JSON-stat format, \
previously deserialized into a python object by \
json.load() or json.loads(), for example.
naming(string): dimension naming. Possible values: 'label' or 'id.'
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
output(DataFrame): pandas.DataFrame with converted data.
|
372,605
|
def _handle_github(self):
value = click.prompt(
_BUG + click.style(
,
fg=,
) + click.style(
,
fg=,
) + click.style(
,
fg=,
) + ,
type=click.Choice([
,
,
,
], ),
default=,
)
getattr(self, + value)()
|
Handle exception and submit it as GitHub issue.
|
372,606
|
def strip_to_chains(self, chains, break_at_endmdl = True):
if chains:
chains = set(chains)
self.lines = [l for l in self.lines if not(l.startswith() or l.startswith() or l.startswith() or l.startswith()) or l[21] in chains]
if break_at_endmdl:
new_lines = []
for l in self.lines:
if l.startswith():
new_lines.append(l)
break
new_lines.append(l)
self.lines = new_lines
self._update_structure_lines()
else:
raise Exception()
|
Throw away all ATOM/HETATM/ANISOU/TER lines for chains that are not in the chains list.
|
372,607
|
def update_module_item(self, id, course_id, module_id, module_item_completion_requirement_min_score=None, module_item_completion_requirement_type=None, module_item_external_url=None, module_item_indent=None, module_item_module_id=None, module_item_new_tab=None, module_item_position=None, module_item_published=None, module_item_title=None):
path = {}
data = {}
params = {}
path["course_id"] = course_id
path["module_id"] = module_id
path["id"] = id
if module_item_title is not None:
data["module_item[title]"] = module_item_title
if module_item_position is not None:
data["module_item[position]"] = module_item_position
if module_item_indent is not None:
data["module_item[indent]"] = module_item_indent
if module_item_external_url is not None:
data["module_item[external_url]"] = module_item_external_url
if module_item_new_tab is not None:
data["module_item[new_tab]"] = module_item_new_tab
if module_item_completion_requirement_type is not None:
self._validate_enum(module_item_completion_requirement_type, ["must_view", "must_contribute", "must_submit"])
data["module_item[completion_requirement][type]"] = module_item_completion_requirement_type
if module_item_completion_requirement_min_score is not None:
data["module_item[completion_requirement][min_score]"] = module_item_completion_requirement_min_score
if module_item_published is not None:
data["module_item[published]"] = module_item_published
if module_item_module_id is not None:
data["module_item[module_id]"] = module_item_module_id
self.logger.debug("PUT /api/v1/courses/{course_id}/modules/{module_id}/items/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/courses/{course_id}/modules/{module_id}/items/{id}".format(**path), data=data, params=params, single_item=True)
|
Update a module item.
Update and return an existing module item
|
372,608
|
def _set_boutons_interface(self, buttons):
for id_action, f, d, is_active in buttons:
icon = self.get_icon(id_action)
action = self.addAction(QIcon(icon), d)
action.setEnabled(is_active)
action.triggered.connect(f)
|
Display buttons given by the list of tuples (id,function,description,is_active)
|
372,609
|
def get_vlan_brief_output_vlan_interface_interface_name(self, **kwargs):
config = ET.Element("config")
get_vlan_brief = ET.Element("get_vlan_brief")
config = get_vlan_brief
output = ET.SubElement(get_vlan_brief, "output")
vlan = ET.SubElement(output, "vlan")
vlan_id_key = ET.SubElement(vlan, "vlan-id")
vlan_id_key.text = kwargs.pop()
interface = ET.SubElement(vlan, "interface")
interface_type_key = ET.SubElement(interface, "interface-type")
interface_type_key.text = kwargs.pop()
interface_name = ET.SubElement(interface, "interface-name")
interface_name.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
372,610
|
def GenerateNetworkedConfigFile(load_hook, normal_class_load_hook, normal_class_dump_hook, **kwargs) -> NetworkedConfigObject:
def NetworkedConfigObjectGenerator(url, safe_load: bool=True):
cfg = NetworkedConfigObject(url=url, load_hook=load_hook, safe_load=safe_load,
normal_class_load_hook=normal_class_load_hook,
normal_class_dump_hook=normal_class_dump_hook)
return cfg
return NetworkedConfigObjectGenerator
|
Generates a NetworkedConfigObject using the specified hooks.
|
372,611
|
def get_motor_offsets(SERVO_OUTPUT_RAW, ofs, motor_ofs):
import mavutil
self = mavutil.mavfile_global
m = SERVO_OUTPUT_RAW
motor_pwm = m.servo1_raw + m.servo2_raw + m.servo3_raw + m.servo4_raw
motor_pwm *= 0.25
rc3_min = self.param(, 1100)
rc3_max = self.param(, 1900)
motor = (motor_pwm - rc3_min) / (rc3_max - rc3_min)
if motor > 1.0:
motor = 1.0
if motor < 0.0:
motor = 0.0
motor_offsets0 = motor_ofs[0] * motor
motor_offsets1 = motor_ofs[1] * motor
motor_offsets2 = motor_ofs[2] * motor
ofs = (ofs[0] + motor_offsets0, ofs[1] + motor_offsets1, ofs[2] + motor_offsets2)
return ofs
|
calculate magnetic field strength from raw magnetometer
|
372,612
|
def _apply_rate(self, max_rate, aggressive=False):
self.log(u"Called _apply_rate")
self.log([u" Aggressive: %s", aggressive])
self.log([u" Max rate: %.3f", max_rate])
regular_fragments = list(self.smflist.regular_fragments)
if len(regular_fragments) <= 1:
self.log(u" The list contains at most one regular fragment, returning")
return
faster_fragments = [(i, f) for i, f in regular_fragments if (f.rate is not None) and (f.rate >= max_rate + Decimal("0.001"))]
if len(faster_fragments) == 0:
self.log(u" No regular fragment faster than max rate, returning")
return
self.log_warn(u" Some fragments have rate faster than max rate:")
self.log([u" %s", [i for i, f in faster_fragments]])
self.log(u"Fixing rate for faster fragments...")
for frag_index, fragment in faster_fragments:
self.smflist.fix_fragment_rate(frag_index, max_rate, aggressive=aggressive)
self.log(u"Fixing rate for faster fragments... done")
faster_fragments = [(i, f) for i, f in regular_fragments if (f.rate is not None) and (f.rate >= max_rate + Decimal("0.001"))]
if len(faster_fragments) > 0:
self.log_warn(u" Some fragments still have rate faster than max rate:")
self.log([u" %s", [i for i, f in faster_fragments]])
|
Try to adjust the rate (characters/second)
of the fragments of the list,
so that it does not exceed the given ``max_rate``.
This is done by testing whether some slack
can be borrowed from the fragment before
the faster current one.
If ``aggressive`` is ``True``,
the slack might be retrieved from the fragment after
the faster current one,
if the previous fragment could not contribute enough slack.
|
372,613
|
def grow(files: hug.types.multiple, in_ext: hug.types.text="short", out_ext: hug.types.text="html",
out_dir: hug.types.text="", recursive: hug.types.smart_boolean=False):
if files == []:
print(text(sys.stdin.read()))
return
print(INTRO)
if recursive:
files = iter_source_code(files, in_ext)
for file_name in files:
with open(file_name, ) as input_file:
output_file_name = "{0}.{1}".format(os.path.join(out_dir, ".".join(file_name.split()[:-1])), out_ext)
with open(output_file_name, ) as output_file:
print(" |-> [{2}]: {3} -> till itHTMLGrowing'))
output_file.write(text(input_file.read()))
print(" |")
print(" | >>> Done Growing! :) <<<")
print("")
|
Grow up your markup
|
372,614
|
def for_kind(kind_map, type_, fallback_key):
if type_ not in kind_map:
if fallback_key not in kind_map:
raise ConfigException( % type_)
config = kind_map[fallback_key]
else:
config = kind_map[type_]
if isinstance(config, dict):
if not in config:
raise ConfigException( % type_)
opts = Options(type_, **config)
else:
opts = Options(type_, config)
return opts
|
Create an Options object from any mapping.
|
372,615
|
def display_col_dp(dp_list, attr_name):
print()
print("---------- {:s} ----------".format(attr_name))
print([getattr(dp, attr_name) for dp in dp_list])
|
show a value assocciated with an attribute for each
DataProperty instance in the dp_list
|
372,616
|
async def send_rpc(self, msg, _context):
service = msg.get()
rpc_id = msg.get()
payload = msg.get()
timeout = msg.get()
response_id = await self.service_manager.send_rpc_command(service, rpc_id, payload,
timeout)
try:
result = await self.service_manager.rpc_results.get(response_id, timeout=timeout)
except asyncio.TimeoutError:
self._logger.warning("RPC 0x%04X on service %s timed out after %f seconds",
rpc_id, service, timeout)
result = dict(result=, response=b)
return result
|
Send an RPC to a service on behalf of a client.
|
372,617
|
def get_beam(self, ra, dec):
if self.data is None:
return self.wcshelper.beam
else:
psf = self.get_psf_sky(ra, dec)
if not all(np.isfinite(psf)):
return None
return Beam(psf[0], psf[1], psf[2])
|
Get the psf as a :class:`AegeanTools.fits_image.Beam` object.
Parameters
----------
ra, dec : float
The sky position (degrees).
Returns
-------
beam : :class:`AegeanTools.fits_image.Beam`
The psf at the given location.
|
372,618
|
def create_run(cmd, project, exp, grp):
from benchbuild.utils import schema as s
session = s.Session()
run = s.Run(
command=str(cmd),
project_name=project.name,
project_group=project.group,
experiment_name=exp,
run_group=str(grp),
experiment_group=project.experiment.id)
session.add(run)
session.commit()
return (run, session)
|
Create a new 'run' in the database.
This creates a new transaction in the database and creates a new
run in this transaction. Afterwards we return both the transaction as
well as the run itself. The user is responsible for committing it when
the time comes.
Args:
cmd: The command that has been executed.
prj: The project this run belongs to.
exp: The experiment this run belongs to.
grp: The run_group (uuid) we blong to.
Returns:
The inserted tuple representing the run and the session opened with
the new run. Don't forget to commit it at some point.
|
372,619
|
def update(self):
if len(self.__folder_list) == 0:
return self.__folder_list
for i in range(len(self.get())):
try:
self.__folder_list[i][] = self.__folder_size(self.path(i))
except OSError as e:
logger.debug(.format(self.path(i), e))
if e.errno == 13:
self.__folder_list[i][] =
else:
self.__folder_list[i][] =
return self.__folder_list
|
Update the command result attributed.
|
372,620
|
def clean_series(series, *args, **kwargs):
if not series.dtype == np.dtype():
return series
if any_generated((isinstance(v, datetime.datetime) for v in series)):
series = series.apply(clip_datetime)
if any_generated((isinstance(v, basestring) for v in series)):
series = series.apply(encode)
series = series.apply(try_float_int)
return series
|
Ensure all datetimes are valid Timestamp objects and dtype is np.datetime64[ns]
>>> from datetime import timedelta
>>> clean_series(pd.Series([datetime.datetime(1, 1, 1), 9, '1942', datetime.datetime(1970, 10, 23)]))
0 1677-09-22 00:12:44+00:00
1 9
2 1942
3 1970-10-23 00:00:00+00:00
dtype: object
>>> clean_series(pd.Series([datetime.datetime(1, 1, 1), datetime.datetime(3000, 10, 23)]))
0 1677-09-22 00:12:44+00:00
1 2262-04-11 23:47:16.854775+00:00
dtype: datetime64[ns, UTC]
|
372,621
|
def threenum(h5file, var, post_col=):
f = h5py.File(h5file, )
d = f[var]
w = f[post_col]
s = d.chunks[0]
n = d.shape[0]
maxval = -np.abs(d[0])
minval = np.abs(d[0])
total = 0
wsum = 0
for x in range(0, n, s):
aN = ~np.logical_or(np.isnan(d[x:x+s]), np.isinf(d[x:x+s]))
d_c = d[x:x+s][aN]
w_c = w[x:x+s][aN]
chunk_max = np.max(d_c)
chunk_min = np.min(d_c)
maxval = chunk_max if chunk_max > maxval else maxval
minval = chunk_min if chunk_min < minval else minval
total += np.sum(w_c*d_c)
wsum += np.sum(w_c)
f.close()
mean = total/float(wsum)
return (minval, maxval, mean)
|
Calculates the three number summary for a variable.
The three number summary is the minimum, maximum and the mean
of the data. Traditionally one would summerise data with the
five number summary: max, min, 1st, 2nd (median), 3rd quartile.
But quantiles are hard to calculate without sorting the data
which hard to do out-of-core.
|
372,622
|
def build(cls, builder, *args, build_loop=None, **kwargs):
loop = asyncio.new_event_loop()
kwargs[] = loop
args = [arg for arg in args
if not isinstance(arg, asyncio.AbstractEventLoop)]
if asyncio.iscoroutinefunction(builder):
checked_loop = build_loop or asyncio.get_event_loop()
api = checked_loop.run_until_complete(builder(*args, **kwargs))
else:
api = builder(*args, **kwargs)
return cls(api, loop)
|
Build a hardware control API and initialize the adapter in one call
:param builder: the builder method to use (e.g.
:py:meth:`hardware_control.API.build_hardware_simulator`)
:param args: Args to forward to the builder method
:param kwargs: Kwargs to forward to the builder method
|
372,623
|
def rename(name, new_name):
id_ = inspect_container(name)[]
log.debug(%s\%s\,
name, id_, new_name)
_client_wrapper(, id_, new_name)
return inspect_container(new_name)[] == id_
|
.. versionadded:: 2017.7.0
Renames a container. Returns ``True`` if successful, and raises an error if
the API returns one. If unsuccessful and the API returns no error (should
not happen), then ``False`` will be returned.
name
Name or ID of existing container
new_name
New name to assign to container
CLI Example:
.. code-block:: bash
salt myminion docker.rename foo bar
|
372,624
|
def read_namespaced_replication_controller_status(self, name, namespace, **kwargs):
kwargs[] = True
if kwargs.get():
return self.read_namespaced_replication_controller_status_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_replication_controller_status_with_http_info(name, namespace, **kwargs)
return data
|
read_namespaced_replication_controller_status # noqa: E501
read status of the specified ReplicationController # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_replication_controller_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the ReplicationController (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1ReplicationController
If the method is called asynchronously,
returns the request thread.
|
372,625
|
def key(self, direction, mechanism, purviews=False, _prefix=None):
return "subsys:{}:{}:{}:{}:{}".format(
self.subsystem_hash, _prefix, direction, mechanism, purviews)
|
Cache key. This is the call signature of |Subsystem.find_mice()|.
|
372,626
|
def encode(self):
buf = bytearray()
for typ in sorted(self.format.keys()):
encoded = None
if typ != 0xFFFF:
(name, marshall) = self.format[typ]
value = getattr(self, name, None)
if value is not None:
try:
encoded = marshall.encode(value)
self.log.debug("Encoded field [{0}] to value {1!r}".format(name, encoded))
except:
self.log.exception("Error encoding key/value: key={0}, value={1!r}".format(name, value))
raise
size = len(encoded) if encoded is not None else 0
packed = struct.pack(, typ)
packed += struct.pack(, size)
if encoded is not None:
if isinstance(encoded, bytearray):
encoded = str(encoded)
elif isinstance(encoded, unicode):
encoded = encoded.encode()
packed += struct.pack( % size, encoded)
buf += packed
return buf
|
Return binary string representation of object.
:rtype: str
|
372,627
|
def blocking_start(self, waiting_func=None):
try:
self.start()
self.wait_for_completion(waiting_func)
except KeyboardInterrupt:
while True:
try:
self.stop()
break
except KeyboardInterrupt:
self.logger.warning(
)
|
this function is just a wrapper around the start and
wait_for_completion methods. It starts the queuing thread and then
waits for it to complete. If run by the main thread, it will detect
the KeyboardInterrupt exception (which is what SIGTERM and SIGHUP
have been translated to) and will order the threads to die.
|
372,628
|
def GetRunlevelsLSB(states):
if not states:
return set()
valid = set(["0", "1", "2", "3", "4", "5", "6"])
_LogInvalidRunLevels(states, valid)
return valid.intersection(set(states.split()))
|
Accepts a string and returns a list of strings of numeric LSB runlevels.
|
372,629
|
def merge_from(self, other):
if other.pattern is not None:
self.pattern = other.pattern
if other.format is not None:
self.format = other.format
self.leading_digits_pattern.extend(other.leading_digits_pattern)
if other.national_prefix_formatting_rule is not None:
self.national_prefix_formatting_rule = other.national_prefix_formatting_rule
if other.national_prefix_optional_when_formatting is not None:
self.national_prefix_optional_when_formatting = other.national_prefix_optional_when_formatting
if other.domestic_carrier_code_formatting_rule is not None:
self.domestic_carrier_code_formatting_rule = other.domestic_carrier_code_formatting_rule
|
Merge information from another NumberFormat object into this one.
|
372,630
|
def _init_security(self):
if not self._starttls():
raise SecurityError("Could not start TLS connection")
self._ssl_handshake()
if not self._auth():
raise SecurityError("Could not authorize connection")
|
Initialize a secure connection to the server.
|
372,631
|
def get(self):
ret = {"clients": list(self.saltclients.keys()),
"return": "Welcome"}
self.write(self.serialize(ret))
|
An endpoint to determine salt-api capabilities
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Legnth: 83
{"clients": ["local", "local_async", "runner", "runner_async"], "return": "Welcome"}
|
372,632
|
def paint(self, painter, option, index):
if option.state & QStyle.State_MouseOver:
styleSheet = self.__style.hover
elif option.state & QStyle.State_Selected:
styleSheet = self.__style.highlight
else:
styleSheet = self.__style.default
self.__label.setStyleSheet(styleSheet)
data = index.model().data(index, Qt.DisplayRole)
self.__label.setText(umbra.ui.common.QVariant_to_string(data))
self.__label.setFixedSize(option.rect.size())
painter.save()
painter.translate(option.rect.topLeft())
self.__label.render(painter)
painter.restore()
|
Reimplements the :meth:`QStyledItemDelegate.paint` method.
|
372,633
|
def get_unique_sample_files(file_samples):
assert isinstance(file_samples, pd.DataFrame)
df = file_samples
df = df.sort_values()
logger.info(, len(df.index))
df.drop_duplicates(, keep=, inplace=True)
logger.info(
, len(df.index))
df[] = df[].apply(lambda x: x[:15])
df.drop_duplicates(, keep=, inplace=True)
logger.info(
, len(df.index))
df.drop(, axis=1, inplace=True)
df.sort_index(inplace=True)
return df
|
Filter file_sample data frame to only keep one file per sample.
Params
------
file_samples : `pandas.DataFrame`
A data frame containing a mapping between file IDs and sample barcodes.
This type of data frame is returned by :meth:`get_file_samples`.
Returns
-------
`pandas.DataFrame`
The filtered data frame.
Notes
-----
In order to remove redundant files in a consistent fashion, the samples are
sorted by file ID, and then the first file for each sample is kept.
|
372,634
|
def write(self, s):
b = s.encode(self.encoding)
return super(PtyProcessUnicode, self).write(b)
|
Write the unicode string ``s`` to the pseudoterminal.
Returns the number of bytes written.
|
372,635
|
def sphere_constrained_cubic(dr, a, alpha):
sqrt3 = np.sqrt(3)
b_coeff = a*0.5/sqrt3*(1 - 0.6*sqrt3*alpha)/(0.15 + a*a)
rscl = np.clip(dr, -0.5*sqrt3, 0.5*sqrt3)
a, d = rscl + 0.5*sqrt3, rscl - 0.5*sqrt3
return alpha*d*a*rscl + b_coeff*d*a - d/sqrt3
|
Sphere generated by a cubic interpolant constrained to be (1,0) on
(r0-sqrt(3)/2, r0+sqrt(3)/2), the size of the cube in the (111) direction.
|
372,636
|
def clone(self, folder, git_repository):
os.makedirs(folder)
git.Git().clone(git_repository, folder)
|
Ensures theme destination folder and clone git specified repo in it.
:param git_repository: git url of the theme folder
:param folder: path of the git managed theme folder
|
372,637
|
def proximal_convex_conj_kl_cross_entropy(space, lam=1, g=None):
r
lam = float(lam)
if g is not None and g not in space:
raise TypeError(.format(g, space))
class ProximalConvexConjKLCrossEntropy(Operator):
def __init__(self, sigma):
self.sigma = float(sigma)
super(ProximalConvexConjKLCrossEntropy, self).__init__(
domain=space, range=space, linear=False)
def _call(self, x, out):
import scipy.special
if g is None:
lambw = scipy.special.lambertw(
(self.sigma / lam) * np.exp(x / lam))
else:
lambw = scipy.special.lambertw(
(self.sigma / lam) * g * np.exp(x / lam))
if not np.issubsctype(self.domain.dtype, np.complexfloating):
lambw = lambw.real
lambw = x.space.element(lambw)
out.lincomb(1, x, -lam, lambw)
return ProximalConvexConjKLCrossEntropy
|
r"""Proximal factory of the convex conj of cross entropy KL divergence.
Function returning the proximal factory of the convex conjugate of the
functional F, where F is the cross entropy Kullback-Leibler (KL)
divergence given by::
F(x) = sum_i (x_i ln(pos(x_i)) - x_i ln(g_i) + g_i - x_i) + ind_P(x)
with ``x`` and ``g`` in the linear space ``X``, and ``g`` non-negative.
Here, ``pos`` denotes the nonnegative part, and ``ind_P`` is the indicator
function for nonnegativity.
Parameters
----------
space : `TensorSpace`
Space X which is the domain of the functional F
lam : positive float, optional
Scaling factor.
g : ``space`` element, optional
Data term, positive. If None it is take as the one-element.
Returns
-------
prox_factory : function
Factory for the proximal operator to be initialized.
See Also
--------
proximal_convex_conj_kl : proximal for related functional
Notes
-----
The functional is given by the expression
.. math::
F(x) = \sum_i (x_i \ln(pos(x_i)) - x_i \ln(g_i) + g_i - x_i) +
I_{x \geq 0}(x)
The indicator function :math:`I_{x \geq 0}(x)` is used to restrict the
domain of :math:`F` such that :math:`F` is defined over whole space
:math:`X`. The non-negativity thresholding :math:`pos` is used to define
:math:`F` in the real numbers.
Note that the functional is not well-defined without a prior g. Hence, if g
is omitted this will be interpreted as if g is equal to the one-element.
The convex conjugate :math:`F^*` of :math:`F` is
.. math::
F^*(p) = \sum_i g_i (exp(p_i) - 1)
where :math:`p` is the variable dual to :math:`x`.
The proximal operator of the convex conjugate of :math:`F` is
.. math::
\mathrm{prox}_{\sigma (\lambda F)^*}(x) = x - \lambda
W(\frac{\sigma}{\lambda} g e^{x/\lambda})
where :math:`\sigma` is the step size-like parameter, :math:`\lambda` is
the weighting in front of the function :math:`F`, and :math:`W` is the
Lambert W function (see, for example, the
`Wikipedia article <https://en.wikipedia.org/wiki/Lambert_W_function>`_).
For real-valued input x, the Lambert :math:`W` function is defined only for
:math:`x \geq -1/e`, and it has two branches for values
:math:`-1/e \leq x < 0`. However, for inteneded use-cases, where
:math:`\lambda` and :math:`g` are positive, the argument of :math:`W`
will always be positive.
`Wikipedia article on Kullback Leibler divergence
<https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence>`_.
For further information about the functional, see for example `this article
<http://ieeexplore.ieee.org/document/1056144/?arnumber=1056144>`_.
The KL cross entropy functional :math:`F`, described above, is related to
another functional functional also know as KL divergence. This functional
is often used as data discrepancy term in inverse problems, when data is
corrupted with Poisson noise. This functional is obtained by changing place
of the prior and the variable. See the See Also section.
|
372,638
|
def put_content(self, content):
r = requests.request(self.method if self.method else , self.url, data=content, **self.storage_args)
if self.raise_for_status: r.raise_for_status()
|
Makes a ``PUT`` request with the content in the body.
:raise: An :exc:`requests.RequestException` if it is not 2xx.
|
372,639
|
def broker_metadata(self, broker_id):
return self._brokers.get(broker_id) or self._bootstrap_brokers.get(broker_id)
|
Get BrokerMetadata
Arguments:
broker_id (int): node_id for a broker to check
Returns:
BrokerMetadata or None if not found
|
372,640
|
def show_qt(qt_class, modal=False, onshow_event=None, force_style=False):
dialog = None
window = anchor()
for d in window.children():
if isinstance(d, qt_class):
dialog = d
if dialog is None:
dialog = qt_class(window)
if force_style:
set_style(dialog, not isinstance(dialog, QtGui.QMenu))
dialog.setAttribute(QtCore.Qt.WA_DeleteOnClose)
pos = QtGui.QCursor.pos()
dialog.move(pos.x(), pos.y())
if onshow_event:
onshow_event(dialog)
if modal:
dialog.exec_()
else:
dialog.show()
dialog.raise_()
return dialog
|
Shows and raise a pyqt window ensuring it's not duplicated
(if it's duplicated then raise the old one).
qt_class argument should be a class/subclass of QMainWindow, QDialog or any
top-level widget.
onshow_event provides a way to pass a function to execute before the window
is showed on screen, it should be handy with modal windows.
Returns the qt_class instance.
|
372,641
|
def get_cond_latents_at_level(cond_latents, level, hparams):
if cond_latents:
if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]:
return [cond_latent[level] for cond_latent in cond_latents]
elif hparams.latent_dist_encoder in ["pointwise", "conv_lstm"]:
return cond_latents[level]
|
Returns a single or list of conditional latents at level 'level'.
|
372,642
|
def timeit(unit=):
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
start = time.time()
_result = func(*args, **kwargs)
_format(unit, time.time() - start, func.__name__ + )
return _result
return inner
return wrapper
|
测试函数耗时
:param unit: 时间单位,有 's','m','h' 可选(seconds,minutes,hours)
|
372,643
|
def clear(self):
for f in [x[] for x in self.metadata.values()]:
os.remove(f)
self.metadata = {}
self._flush()
|
Remove all entries from the cache and delete all data.
:return:
|
372,644
|
def bind_column(model, name, column, force=False, recursive=False, copy=False) -> Column:
if not subclassof(model, BaseModel):
raise InvalidModel(f"{model} is not a subclass of BaseModel")
meta = model.Meta
if copy:
column = copyfn(column)
column._name = name
safe_repr = unbound_repr(column)
same_dynamo_name = (
util.index(meta.columns, "dynamo_name").get(column.dynamo_name) or
util.index(meta.indexes, "dynamo_name").get(column.dynamo_name)
)
same_name = (
meta.columns_by_name.get(column.name) or
util.index(meta.indexes, "name").get(column.name)
)
if column.hash_key and column.range_key:
raise InvalidModel(f"Tried to bind {safe_repr} as both a hash and range key.")
if force:
if same_name:
unbind(meta, name=column.name)
if same_dynamo_name:
unbind(meta, dynamo_name=column.dynamo_name)
else:
if same_name:
raise InvalidModel(
f"The column {safe_repr} has the same name as an existing column "
f"or index {same_name}. Did you mean to bind with force=True?")
if same_dynamo_name:
raise InvalidModel(
f"The column {safe_repr} has the same dynamo_name as an existing "
f"column or index {same_name}. Did you mean to bind with force=True?")
if column.hash_key and meta.hash_key:
raise InvalidModel(
f"Tried to bind {safe_repr} but {meta.model} "
f"already has a different hash_key: {meta.hash_key}")
if column.range_key and meta.range_key:
raise InvalidModel(
f"Tried to bind {safe_repr} but {meta.model} "
f"already has a different range_key: {meta.range_key}")
column.model = meta.model
meta.columns.add(column)
meta.columns_by_name[name] = column
setattr(meta.model, name, column)
if column.hash_key:
meta.hash_key = column
meta.keys.add(column)
if column.range_key:
meta.range_key = column
meta.keys.add(column)
try:
for index in meta.indexes:
refresh_index(meta, index)
except KeyError as e:
raise InvalidModel(
f"Binding column {column} removed a required column for index {unbound_repr(index)}") from e
if recursive:
for subclass in util.walk_subclasses(meta.model):
try:
bind_column(subclass, name, column, force=False, recursive=False, copy=True)
except InvalidModel:
pass
return column
|
Bind a column to the model with the given name.
This method is primarily used during BaseModel.__init_subclass__, although it can be used to easily
attach a new column to an existing model:
.. code-block:: python
import bloop.models
class User(BaseModel):
id = Column(String, hash_key=True)
email = Column(String, dynamo_name="e")
bound = bloop.models.bind_column(User, "email", email)
assert bound is email
# rebind with force, and use a copy
bound = bloop.models.bind_column(User, "email", email, force=True, copy=True)
assert bound is not email
If an existing index refers to this column, it will be updated to point to the new column
using :meth:`~bloop.models.refresh_index`, including recalculating the index projection.
Meta attributes including ``Meta.columns``, ``Meta.hash_key``, etc. will be updated if necessary.
If ``name`` or the column's ``dynamo_name`` conflicts with an existing column or index on the model, raises
:exc:`~bloop.exceptions.InvalidModel` unless ``force`` is True. If ``recursive`` is ``True`` and there are
existing subclasses of ``model``, a copy of the column will attempt to bind to each subclass. The recursive
calls will not force the bind, and will always use a new copy. If ``copy`` is ``True`` then a copy of the
provided column is used. This uses a shallow copy via :meth:`~bloop.models.Column.__copy__`.
:param model:
The model to bind the column to.
:param name:
The name to bind the column as. In effect, used for ``setattr(model, name, column)``
:param column:
The column to bind to the model.
:param force:
Unbind existing columns or indexes with the same name or dynamo_name. Default is False.
:param recursive:
Bind to each subclass of this model. Default is False.
:param copy:
Use a copy of the column instead of the column directly. Default is False.
:return:
The bound column. This is a new column when ``copy`` is True, otherwise the input column.
|
372,645
|
def DeserializeExclusiveData(self, reader):
self.Type = TransactionType.ClaimTransaction
if self.Version != 0:
raise Exception()
numrefs = reader.ReadVarInt()
claims = []
for i in range(0, numrefs):
c = CoinReference()
c.Deserialize(reader)
claims.append(c)
self.Claims = claims
if len(self.Claims) == 0:
raise Exception()
|
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
Raises:
Exception: If the transaction type is incorrect or if there are no claims.
|
372,646
|
def install(*pkgs, **kwargs):
**
attributes = kwargs.get(, False)
if not pkgs:
return "Plese specify a package or packages to upgrade"
cmd = _quietnix()
cmd.append()
if kwargs.get(, False):
cmd.extend(_zip_flatten(, pkgs))
else:
cmd.extend(pkgs)
out = _run(cmd)
installs = list(itertools.chain.from_iterable(
[s.split()[1:] for s in out[].splitlines()
if s.startswith()]
))
return [_strip_quotes(s) for s in installs]
|
Installs a single or multiple packages via nix
:type pkgs: list(str)
:param pkgs:
packages to update
:param bool attributes:
Pass the list of packages or single package as attribues, not package names.
default: False
:return: Installed packages. Example element: ``gcc-3.3.2``
:rtype: list(str)
.. code-block:: bash
salt '*' nix.install package [package2 ...]
salt '*' nix.install attributes=True attr.name [attr.name2 ...]
|
372,647
|
def inst(self, *instructions):
for instruction in instructions:
if isinstance(instruction, list):
self.inst(*instruction)
elif isinstance(instruction, types.GeneratorType):
self.inst(*instruction)
elif isinstance(instruction, tuple):
if len(instruction) == 0:
raise ValueError("tuple should have at least one element")
elif len(instruction) == 1:
self.inst(instruction[0])
else:
op = instruction[0]
if op == "MEASURE":
if len(instruction) == 2:
self.measure(instruction[1], None)
else:
self.measure(instruction[1], instruction[2])
else:
params = []
possible_params = instruction[1]
rest = instruction[2:]
if isinstance(possible_params, list):
params = possible_params
else:
rest = [possible_params] + list(rest)
self.gate(op, params, rest)
elif isinstance(instruction, string_types):
self.inst(run_parser(instruction.strip()))
elif isinstance(instruction, Program):
if id(self) == id(instruction):
raise ValueError("Nesting a program inside itself is not supported")
for defgate in instruction._defined_gates:
self.inst(defgate)
for instr in instruction._instructions:
self.inst(instr)
elif isinstance(instruction, DefGate):
defined_gate_names = [gate.name for gate in self._defined_gates]
if instruction.name in defined_gate_names:
warnings.warn("Gate {} has already been defined in this program"
.format(instruction.name))
self._defined_gates.append(instruction)
elif isinstance(instruction, AbstractInstruction):
self._instructions.append(instruction)
self._synthesized_instructions = None
else:
raise TypeError("Invalid instruction: {}".format(instruction))
return self
|
Mutates the Program object by appending new instructions.
This function accepts a number of different valid forms, e.g.
>>> p = Program()
>>> p.inst(H(0)) # A single instruction
>>> p.inst(H(0), H(1)) # Multiple instructions
>>> p.inst([H(0), H(1)]) # A list of instructions
>>> p.inst(H(i) for i in range(4)) # A generator of instructions
>>> p.inst(("H", 1)) # A tuple representing an instruction
>>> p.inst("H 0") # A string representing an instruction
>>> q = Program()
>>> p.inst(q) # Another program
It can also be chained:
>>> p = Program()
>>> p.inst(H(0)).inst(H(1))
:param instructions: A list of Instruction objects, e.g. Gates
:return: self for method chaining
|
372,648
|
def reload(self):
if not self.fd.closed: self.fd.close()
self.fd = open(self.fd.name, )
self.load()
|
Automatically reloads the config file.
This is just an alias for self.load().
|
372,649
|
def cressman_point(sq_dist, values, radius):
r
weights = tools.cressman_weights(sq_dist, radius)
total_weights = np.sum(weights)
return sum(v * (w / total_weights) for (w, v) in zip(weights, values))
|
r"""Generate a Cressman interpolation value for a point.
The calculated value is based on the given distances and search radius.
Parameters
----------
sq_dist: (N, ) ndarray
Squared distance between observations and grid point
values: (N, ) ndarray
Observation values in same order as sq_dist
radius: float
Maximum distance to search for observations to use for
interpolation.
Returns
-------
value: float
Interpolation value for grid point.
|
372,650
|
def set_dependent_orders(
self,
accountID,
tradeSpecifier,
**kwargs
):
request = Request(
,
)
request.set_path_param(
,
accountID
)
request.set_path_param(
,
tradeSpecifier
)
body = EntityDict()
if in kwargs:
body.set(, kwargs[])
if in kwargs:
body.set(, kwargs[])
if in kwargs:
body.set(, kwargs[])
request.set_body_dict(body.dict)
response = self.ctx.request(request)
if response.content_type is None:
return response
if not response.content_type.startswith("application/json"):
return response
jbody = json.loads(response.raw_body)
parsed_body = {}
if str(response.status) == "200":
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.TakeProfitOrderTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.OrderFillTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.StopLossOrderTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.OrderFillTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.OrderCancelTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.TrailingStopLossOrderTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
elif str(response.status) == "400":
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.OrderCancelRejectTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.TakeProfitOrderRejectTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.OrderCancelRejectTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.StopLossOrderRejectTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.OrderCancelRejectTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
self.ctx.transaction.TrailingStopLossOrderRejectTransaction.from_dict(
jbody[],
self.ctx
)
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
elif str(response.status) == "401":
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
elif str(response.status) == "404":
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
elif str(response.status) == "405":
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
if jbody.get() is not None:
parsed_body[] = \
jbody.get()
else:
parsed_body = jbody
response.body = parsed_body
return response
|
Create, replace and cancel a Trade's dependent Orders (Take Profit,
Stop Loss and Trailing Stop Loss) through the Trade itself
Args:
accountID:
Account Identifier
tradeSpecifier:
Specifier for the Trade
takeProfit:
The specification of the Take Profit to create/modify/cancel.
If takeProfit is set to null, the Take Profit Order will be
cancelled if it exists. If takeProfit is not provided, the
exisiting Take Profit Order will not be modified. If a sub-
field of takeProfit is not specified, that field will be set to
a default value on create, and be inherited by the replacing
order on modify.
stopLoss:
The specification of the Stop Loss to create/modify/cancel. If
stopLoss is set to null, the Stop Loss Order will be cancelled
if it exists. If stopLoss is not provided, the exisiting Stop
Loss Order will not be modified. If a sub-field of stopLoss is
not specified, that field will be set to a default value on
create, and be inherited by the replacing order on modify.
trailingStopLoss:
The specification of the Trailing Stop Loss to
create/modify/cancel. If trailingStopLoss is set to null, the
Trailing Stop Loss Order will be cancelled if it exists. If
trailingStopLoss is not provided, the exisiting Trailing Stop
Loss Order will not be modified. If a sub-field of
trailngStopLoss is not specified, that field will be set to a
default value on create, and be inherited by the replacing
order on modify.
Returns:
v20.response.Response containing the results from submitting the
request
|
372,651
|
def _remove_unicode_encoding(xml_file):
unicode
with salt.utils.files.fopen(xml_file, ) as f:
xml_content = f.read()
modified_xml = re.sub(r"]+unicode[\, , xml_content.decode(), count=1)
xmltree = lxml.etree.parse(six.StringIO(modified_xml))
return xmltree
|
attempts to remove the "encoding='unicode'" from an xml file
as lxml does not support that on a windows node currently
see issue #38100
|
372,652
|
def _kl_half_normal_half_normal(a, b, name=None):
with tf.name_scope(name or "kl_half_normal_half_normal"):
return (tf.math.log(b.scale) - tf.math.log(a.scale) +
(a.scale**2 - b.scale**2) / (2 * b.scale**2))
|
Calculate the batched KL divergence KL(a || b) with a and b `HalfNormal`.
Args:
a: Instance of a `HalfNormal` distribution object.
b: Instance of a `HalfNormal` distribution object.
name: (optional) Name to use for created operations.
default is "kl_half_normal_half_normal".
Returns:
Batchwise KL(a || b)
|
372,653
|
def _evaluate_rhs(cls, funcs, nodes, problem):
evald_funcs = cls._evaluate_functions(funcs, nodes)
evald_rhs = problem.rhs(nodes, *evald_funcs, **problem.params)
return evald_rhs
|
Compute the value of the right-hand side of the system of ODEs.
Parameters
----------
basis_funcs : list(function)
nodes : numpy.ndarray
problem : TwoPointBVPLike
Returns
-------
evaluated_rhs : list(float)
|
372,654
|
def transfer(self, user):
r = self._h._http_resource(
method=,
resource=(, self.name),
data={: user}
)
return r.ok
|
Transfers app to given username's account.
|
372,655
|
def sensorupdate(self, data):
if not isinstance(data, dict):
raise TypeError()
msg =
for key in data.keys():
msg += % (self._escape(str(key)),
self._escape(str(data[key])))
self._send(msg)
|
Given a dict of sensors and values, updates those sensors with the
values in Scratch.
|
372,656
|
def SkipAhead(self, file_object, number_of_characters):
lines_size = len(self.lines)
while number_of_characters >= lines_size:
number_of_characters -= lines_size
self.lines =
self.ReadLines(file_object)
lines_size = len(self.lines)
if lines_size == 0:
return
self.lines = self.lines[number_of_characters:]
|
Skips ahead a number of characters.
Args:
file_object (dfvfs.FileIO): file-like object.
number_of_characters (int): number of characters.
|
372,657
|
def seek_in_frame(self, pos, *args, **kwargs):
super().seek(self._total_offset + pos, *args, **kwargs)
|
Seeks relative to the total offset of the current contextual frames.
|
372,658
|
def get_thumbnail(original, size, **options):
engine = get_engine()
cache = get_cache_backend()
original = SourceFile(original)
crop = options.get(, None)
options = engine.evaluate_options(options)
thumbnail_name = generate_filename(original, size, crop)
if settings.THUMBNAIL_DUMMY:
engine = DummyEngine()
return engine.get_thumbnail(thumbnail_name, engine.parse_size(size), crop, options)
cached = cache.get(thumbnail_name)
force = options is not None and in options and options[]
if not force and cached:
return cached
thumbnail = Thumbnail(thumbnail_name, engine.get_format(original, options))
if force or not thumbnail.exists:
size = engine.parse_size(size)
thumbnail.image = engine.get_thumbnail(original, size, crop, options)
thumbnail.save(options)
for resolution in settings.THUMBNAIL_ALTERNATIVE_RESOLUTIONS:
resolution_size = engine.calculate_alternative_resolution_size(resolution, size)
image = engine.get_thumbnail(original, resolution_size, crop, options)
thumbnail.save_alternative_resolution(resolution, image, options)
cache.set(thumbnail)
return thumbnail
|
Creates or gets an already created thumbnail for the given image with the given size and
options.
:param original: File-path, url or base64-encoded string of the image that you want an
thumbnail.
:param size: String with the wanted thumbnail size. On the form: ``200x200``, ``200`` or
``x200``.
:param crop: Crop settings, should be ``center``, ``top``, ``right``, ``bottom``, ``left``.
:param force: If set to ``True`` the thumbnail will be created even if it exists before.
:param quality: Overrides ``THUMBNAIL_QUALITY``, will set the quality used by the backend while
saving the thumbnail.
:param scale_up: Overrides ``THUMBNAIL_SCALE_UP``, if set to ``True`` the image will be scaled
up if necessary.
:param colormode: Overrides ``THUMBNAIL_COLORMODE``, The default colormode for thumbnails.
Supports all values supported by pillow. In other engines there is a best
effort translation from pillow modes to the modes supported by the current
engine.
:param format: Overrides the format the thumbnail will be saved in. This will override both the
detected file type as well as the one specified in ``THUMBNAIL_FALLBACK_FORMAT``.
:return: A Thumbnail object
|
372,659
|
def new(self):
if self._initialized:
raise pycdlibexception.PyCdlibInternalError()
self.char_set = _unicodecharset()
self.log_vol_ident = _ostaunicode_zero_pad(, 128)
self.lv_info1 = b * 36
self.lv_info2 = b * 36
self.lv_info3 = b * 36
self.impl_ident = UDFEntityID()
self.impl_ident.new(0, b, b)
self.impl_use = b * 128
self._initialized = True
|
A method to create a new UDF Implementation Use Volume Descriptor Implementation Use field.
Parameters:
None:
Returns:
Nothing.
|
372,660
|
def plotlyviz(
scomplex,
colorscale=None,
title="Kepler Mapper",
graph_layout="kk",
color_function=None,
color_function_name=None,
dashboard=False,
graph_data=False,
factor_size=3,
edge_linewidth=1.5,
node_linecolor="rgb(200,200,200)",
width=600,
height=500,
bgcolor="rgba(240, 240, 240, 0.95)",
left=10,
bottom=35,
summary_height=300,
summary_width=600,
summary_left=20,
summary_right=20,
hist_left=25,
hist_right=25,
member_textbox_width=800,
filename=None,
):
if not colorscale:
colorscale = default_colorscale
kmgraph, mapper_summary, n_color_distribution = get_mapper_graph(
scomplex,
colorscale=colorscale,
color_function=color_function,
color_function_name=color_function_name,
)
annotation = get_kmgraph_meta(mapper_summary)
plgraph_data = plotly_graph(
kmgraph,
graph_layout=graph_layout,
colorscale=colorscale,
factor_size=factor_size,
edge_linewidth=edge_linewidth,
node_linecolor=node_linecolor,
)
layout = plot_layout(
title=title,
width=width,
height=height,
annotation_text=annotation,
bgcolor=bgcolor,
left=left,
bottom=bottom,
)
result = go.FigureWidget(data=plgraph_data, layout=layout)
if color_function_name:
with result.batch_update():
result.data[1].marker.colorbar.title = color_function_name
result.data[1].marker.colorbar.titlefont.size = 10
if dashboard or graph_data:
fw_hist = node_hist_fig(n_color_distribution, left=hist_left, right=hist_right)
fw_summary = summary_fig(
mapper_summary,
width=summary_width,
height=summary_height,
left=summary_left,
right=summary_right,
)
fw_graph = result
result = hovering_widgets(
kmgraph, fw_graph, member_textbox_width=member_textbox_width
)
if graph_data:
result = ipw.VBox([fw_graph, ipw.HBox([fw_summary, fw_hist])])
if filename:
pio.write_image(result, filename)
return result
|
Visualizations and dashboards for kmapper graphs using Plotly. This method is suitable for use in Jupyter notebooks.
The generated FigureWidget can be updated (by performing a restyle or relayout). For example, let us add a title
to the colorbar (the name of the color function, if any),
and set the title font size. To perform these updates faster, Plotly 3.+ provides a context manager that batches up all data and layout updates:
To display more info on the generated kmapper-graph, define two more FigureWidget(s):
the global node distribution figure, and a dummy figure
that displays info on the algorithms involved in getting the graph from data, as well as sklearn class instances.
A FigureWidget has event listeners for hovering, clicking or selecting. Using the first one for `fw_graph`
we define, via the function `hovering_widgets()`, widgets that display the node distribution, when the node is hovered over, and two textboxes for the cluster size and the member ids/labels of the hovered node members.
Parameters
-----------
scomplex: dict
Simplicial complex is the output from the KeplerMapper `map` method.
title: str
Title of output graphic
graph_layout: igraph layout;
recommended 'kk' (kamada-kawai) or 'fr' (fruchterman-reingold)
colorscale:
Plotly colorscale(colormap) to color graph nodes
dashboard: bool, default is False
If true, display complete dashboard of node information
graph_data: bool, default is False
If true, display graph metadata
factor_size: double, default is 3
a factor for the node size
edge_linewidth : double, default is 1.5
node_linecolor: color str, default is "rgb(200,200,200)"
width: int, default is 600,
height: int, default is 500,
bgcolor: color str, default is "rgba(240, 240, 240, 0.95)",
left: int, default is 10,
bottom: int, default is 35,
summary_height: int, default is 300,
summary_width: int, default is 600,
summary_left: int, default is 20,
summary_right: int, default is 20,
hist_left: int, default is 25,
hist_right: int, default is 25,
member_textbox_width: int, default is 800,
filename: str, default is None
if filename is given, the graphic will be saved to that file.
Returns
---------
result: plotly.FigureWidget
A FigureWidget that can be shown or editted. See the Plotly Demo notebook for examples of use.
|
372,661
|
def redefined_by_decorator(node):
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
|
return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
|
372,662
|
def nextLunarEclipse(date):
eclipse = swe.lunarEclipseGlobal(date.jd, backward=False)
return Datetime.fromJD(eclipse[], date.utcoffset)
|
Returns the Datetime of the maximum phase of the
next global lunar eclipse.
|
372,663
|
def recurse_taxonomy_map(tax_id_map, tax_id, parent=False):
if parent:
pass
else:
def _child_recurse(tax_id, visited):
try:
children = [tax_id] + list(tax_id_map[tax_id])
except KeyError:
children = [tax_id]
for child in children:
if child not in visited:
visited.append(child)
children.extend(_child_recurse(child, visited))
return children
return list(set(_child_recurse(tax_id, [])))
|
Takes the output dict from make_taxonomy_map and an input tax_id
and recurses either up or down through the tree to get /all/ children
(or parents) of the given tax_id.
|
372,664
|
def escape_latex(text):
r
text = unicode(text.decode())
CHARS = {
: r,
: r,
: r,
: r,
: r,
: r,
: r,
: r,
: r,
: r,
}
escaped = "".join([CHARS.get(char, char) for char in text])
return escaped.encode()
|
r"""Escape characters of given text.
This function takes the given text and escapes characters
that have a special meaning in LaTeX: # $ % ^ & _ { } ~ \
|
372,665
|
def import_sequence_flow_to_graph(diagram_graph, sequence_flows, process_id, flow_element):
flow_id = flow_element.getAttribute(consts.Consts.id)
name = flow_element.getAttribute(consts.Consts.name) if flow_element.hasAttribute(consts.Consts.name) else ""
source_ref = flow_element.getAttribute(consts.Consts.source_ref)
target_ref = flow_element.getAttribute(consts.Consts.target_ref)
sequence_flows[flow_id] = {consts.Consts.name: name, consts.Consts.source_ref: source_ref,
consts.Consts.target_ref: target_ref}
diagram_graph.add_edge(source_ref, target_ref)
diagram_graph[source_ref][target_ref][consts.Consts.id] = flow_id
diagram_graph[source_ref][target_ref][consts.Consts.process] = process_id
diagram_graph[source_ref][target_ref][consts.Consts.name] = name
diagram_graph[source_ref][target_ref][consts.Consts.source_ref] = source_ref
diagram_graph[source_ref][target_ref][consts.Consts.target_ref] = target_ref
for element in utils.BpmnImportUtils.iterate_elements(flow_element):
if element.nodeType != element.TEXT_NODE:
tag_name = utils.BpmnImportUtils.remove_namespace_from_tag_name(element.tagName)
if tag_name == consts.Consts.condition_expression:
condition_expression = element.firstChild.nodeValue
diagram_graph[source_ref][target_ref][consts.Consts.condition_expression] = {
consts.Consts.id: element.getAttribute(consts.Consts.id),
consts.Consts.condition_expression: condition_expression
}
if consts.Consts.outgoing_flow not in diagram_graph.node[source_ref]:
diagram_graph.node[source_ref][consts.Consts.outgoing_flow] = []
outgoing_list = diagram_graph.node[source_ref][consts.Consts.outgoing_flow]
if flow_id not in outgoing_list:
outgoing_list.append(flow_id)
if consts.Consts.incoming_flow not in diagram_graph.node[target_ref]:
diagram_graph.node[target_ref][consts.Consts.incoming_flow] = []
incoming_list = diagram_graph.node[target_ref][consts.Consts.incoming_flow]
if flow_id not in incoming_list:
incoming_list.append(flow_id)
|
Adds a new edge to graph and a record to sequence_flows dictionary.
Input parameter is object of class xml.dom.Element.
Edges are identified by pair of sourceRef and targetRef attributes of BPMNFlow element. We also
provide a dictionary, that maps sequenceFlow ID attribute with its sourceRef and targetRef.
Method adds basic attributes of sequenceFlow element to edge. Those elements are:
- id - added as edge attribute, we assume that this is a required value,
- name - optional attribute, empty string by default.
:param diagram_graph: NetworkX graph representing a BPMN process diagram,
:param sequence_flows: dictionary (associative list) of sequence flows existing in diagram.
Key attribute is sequenceFlow ID, value is a dictionary consisting three key-value pairs: "name" (sequence
flow name), "sourceRef" (ID of node, that is a flow source) and "targetRef" (ID of node, that is a flow target),
:param process_id: string object, representing an ID of process element,
:param flow_element: object representing a BPMN XML 'sequenceFlow' element.
|
372,666
|
def collage(img_spec,
num_rows=2,
num_cols=6,
rescale_method=,
cmap=,
annot=None,
padding=5,
bkground_thresh=None,
output_path=None,
figsize=None,
**kwargs):
"Produces a collage of various slices from different orientations in the given 3D image"
num_rows, num_cols, padding = check_params(num_rows, num_cols, padding)
img = read_image(img_spec, bkground_thresh=bkground_thresh)
img = crop_image(img, padding)
img, (min_value, max_value) = check_rescaling_collage(img, rescale_method,
return_extrema=True)
num_slices_per_view = num_rows * num_cols
slices = pick_slices(img, num_slices_per_view)
plt.style.use()
num_axes = 3
if figsize is None:
figsize = [3 * num_axes * num_rows, 3 * num_cols]
fig, ax = plt.subplots(num_axes * num_rows, num_cols, figsize=figsize)
if annot is not None:
fig.suptitle(annot, backgroundcolor=, color=)
display_params = dict(interpolation=, cmap=cmap,
aspect=, origin=,
vmin=min_value, vmax=max_value)
ax = ax.flatten()
ax_counter = 0
for dim_index in range(3):
for slice_num in slices[dim_index]:
plt.sca(ax[ax_counter])
ax_counter = ax_counter + 1
slice1 = get_axis(img, dim_index, slice_num)
plt.imshow(slice1, **display_params)
plt.axis()
fig.tight_layout()
if output_path is not None:
output_path = output_path.replace(, )
fig.savefig(output_path + , bbox_inches=)
return fig
|
Produces a collage of various slices from different orientations in the given 3D image
|
372,667
|
def register(self, item):
if callable(item) and hasattr(item, ):
item = item.__orb__
key = item.name()
model = self.__model
if isinstance(item, orb.Index):
self.__indexes[key] = item
item.setSchema(self)
if model and not hasattr(model, key):
setattr(model, key, classmethod(item))
elif isinstance(item, orb.Collector):
self.__collectors[key] = item
item.setSchema(self)
elif isinstance(item, orb.Column):
self.__columns[key] = item
item.setSchema(self)
|
Registers a new orb object to this schema. This could be a column, index, or collector -- including
a virtual object defined through the orb.virtual decorator.
:param item: <variant>
:return:
|
372,668
|
def selected(self, sel):
ParameterItem.selected(self, sel)
if self.widget is None:
return
if sel and self.param.writable():
self.showEditor()
elif self.hideWidget:
self.hideEditor()
|
Called when this item has been selected (sel=True) OR deselected (sel=False)
|
372,669
|
def sizeof_fmt(num, suffix=):
precision = {: 0, : 0, : 0, : 3, : 6, : 9, : 12, : 15}
for unit in [, , , , , , , ]:
if abs(num) < 1024.0:
format_string = "{number:.%df} {unit}{suffix}" % precision[unit]
return format_string.format(number=num, unit=unit, suffix=suffix)
num /= 1024.0
return "%.18f %s%s" % (num, , suffix)
|
Adapted from https://stackoverflow.com/a/1094933
Re: precision - display enough decimals to show progress on a slow (<5 MB/s) Internet connection
|
372,670
|
def update_nginx_from_config(nginx_config):
logging.info()
temp_dir = tempfile.mkdtemp()
os.mkdir(os.path.join(temp_dir, ))
_write_nginx_config(constants.NGINX_BASE_CONFIG, os.path.join(temp_dir, constants.NGINX_PRIMARY_CONFIG_NAME))
_write_nginx_config(nginx_config[], os.path.join(temp_dir, constants.NGINX_HTTP_CONFIG_NAME))
_write_nginx_config(nginx_config[], os.path.join(temp_dir, constants.NGINX_STREAM_CONFIG_NAME))
_write_nginx_config(constants.NGINX_502_PAGE_HTML, os.path.join(temp_dir, , constants.NGINX_502_PAGE_NAME))
sync_local_path_to_vm(temp_dir, constants.NGINX_CONFIG_DIR_IN_VM)
|
Write the given config to disk as a Dusty sub-config
in the Nginx includes directory. Then, either start nginx
or tell it to reload its config to pick up what we've
just written.
|
372,671
|
def compute_edge_widths(self):
if type(self.edge_width) is str:
edges = self.graph.edges
self.edge_widths = [edges[n][self.edge_width] for n in self.edges]
else:
self.edge_widths = self.edge_width
|
Compute the edge widths.
|
372,672
|
def convertShape(shapeString):
cshape = []
for pointString in shapeString.split():
p = [float(e) for e in pointString.split(",")]
if len(p) == 2:
cshape.append((p[0], p[1], 0.))
elif len(p) == 3:
cshape.append(tuple(p))
else:
raise ValueError(
% pointString)
return cshape
|
Convert xml shape string into float tuples.
This method converts the 2d or 3d shape string from SUMO's xml file
into a list containing 3d float-tuples. Non existant z coordinates default
to zero. If shapeString is empty, an empty list will be returned.
|
372,673
|
def _get_hourly_data(self, day_date, p_p_id):
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_state": "normal",
"p_p_mode": "view",
"p_p_resource_id": "resourceObtenirDonneesConsommationHoraires",
"p_p_cacheability": "cacheLevelPage",
"p_p_col_id": "column-2",
"p_p_col_count": 1,
"date": day_date,
}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get hourly data")
try:
json_output = yield from raw_res.json(content_type=)
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecAnnualError("Could not get hourly data")
hourly_consumption_data = json_output[][]
hourly_power_data = json_output[][]
params = {"p_p_id": p_p_id,
"p_p_lifecycle": 2,
"p_p_state": "normal",
"p_p_mode": "view",
"p_p_resource_id": "resourceObtenirDonneesMeteoHoraires",
"p_p_cacheability": "cacheLevelPage",
"p_p_col_id": "column-2",
"p_p_col_count": 1,
"dateDebut": day_date,
"dateFin": day_date,
}
try:
raw_res = yield from self._session.get(PROFILE_URL,
params=params,
timeout=self._timeout)
except OSError:
raise PyHydroQuebecError("Can not get hourly data")
try:
json_output = yield from raw_res.json(content_type=)
except (OSError, json.decoder.JSONDecodeError):
raise PyHydroQuebecAnnualError("Could not get hourly data")
hourly_weather_data = []
if not json_output.get():
return hourly_data
|
Get Hourly Data.
|
372,674
|
def _is_chunk_markdown(source):
lines = source.splitlines()
if all(line.startswith() for line in lines):
source = .join(line[2:] for line in lines
if not line[2:].startswith())
if not source:
return True
return not _is_python(source)
return False
|
Return whether a chunk contains Markdown contents.
|
372,675
|
def cmd_notice(self, connection, sender, target, payload):
msg_target, topic, content = self.parse_payload(payload)
def callback(sender, payload):
logging.info("NOTICE ACK from %s: %s", sender, payload)
self.__herald.notice(msg_target, topic, content, callback)
|
Sends a message
|
372,676
|
def create_tasks(self, wfk_file, scr_input):
assert len(self) == 0
wfk_file = self.wfk_file = os.path.abspath(wfk_file)
shell_manager = self.manager.to_shell_manager(mpi_procs=1)
w = Work(workdir=self.tmpdir.path_join("_qptdm_run"), manager=shell_manager)
fake_input = scr_input.deepcopy()
fake_task = w.register(fake_input)
w.allocate()
w.build()
fake_task.inlink_file(wfk_file)
fake_task.set_vars({"nqptdm": -1})
fake_task.start_and_wait()
with NetcdfReader(fake_task.outdir.has_abiext("qptdms.nc")) as reader:
qpoints = reader.read_value("reduced_coordinates_of_kpoints")
for qpoint in qpoints:
qptdm_input = scr_input.deepcopy()
qptdm_input.set_vars(nqptdm=1, qptdm=qpoint)
new_task = self.register_scr_task(qptdm_input, manager=self.manager)
if self.flow.gc is not None:
new_task.set_gc(self.flow.gc)
self.allocate()
|
Create the SCR tasks and register them in self.
Args:
wfk_file: Path to the ABINIT WFK file to use for the computation of the screening.
scr_input: Input for the screening calculation.
|
372,677
|
def frame_iv(algorithm, sequence_number):
if sequence_number < 1 or sequence_number > MAX_FRAME_COUNT:
raise ActionNotAllowedError(
"Invalid frame sequence number: {actual}\nMust be between 1 and {max}".format(
actual=sequence_number, max=MAX_FRAME_COUNT
)
)
prefix_len = algorithm.iv_len - 4
prefix = b"\x00" * prefix_len
return prefix + struct.pack(">I", sequence_number)
|
Builds the deterministic IV for a body frame.
:param algorithm: Algorithm for which to build IV
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param int sequence_number: Frame sequence number
:returns: Generated IV
:rtype: bytes
:raises ActionNotAllowedError: if sequence number of out bounds
|
372,678
|
def _request_one_trial_job(self):
if not self.generated_hyper_configs:
ret = {
: ,
: ,
:
}
send(CommandType.NoMoreTrialJobs, json_tricks.dumps(ret))
return
assert self.generated_hyper_configs
params = self.generated_hyper_configs.pop()
ret = {
: params[0],
: ,
: params[1]
}
self.parameters[params[0]] = params[1]
send(CommandType.NewTrialJob, json_tricks.dumps(ret))
self.credit -= 1
|
get one trial job, i.e., one hyperparameter configuration.
If this function is called, Command will be sent by BOHB:
a. If there is a parameter need to run, will return "NewTrialJob" with a dict:
{
'parameter_id': id of new hyperparameter
'parameter_source': 'algorithm'
'parameters': value of new hyperparameter
}
b. If BOHB don't have parameter waiting, will return "NoMoreTrialJobs" with
{
'parameter_id': '-1_0_0',
'parameter_source': 'algorithm',
'parameters': ''
}
|
372,679
|
def atstart(callback, *args, **kwargs):
s behavior, have it run automatically if
your object is instantiated or module imported.
This is not at all useful after your hook framework as been launched.
'
global _atstart
_atstart.append((callback, args, kwargs))
|
Schedule a callback to run before the main hook.
Callbacks are run in the order they were added.
This is useful for modules and classes to perform initialization
and inject behavior. In particular:
- Run common code before all of your hooks, such as logging
the hook name or interesting relation data.
- Defer object or module initialization that requires a hook
context until we know there actually is a hook context,
making testing easier.
- Rather than requiring charm authors to include boilerplate to
invoke your helper's behavior, have it run automatically if
your object is instantiated or module imported.
This is not at all useful after your hook framework as been launched.
|
372,680
|
def delete_evpn_local(route_type, route_dist, **kwargs):
try:
tm = CORE_MANAGER.get_core_service().table_manager
tm.update_vrf_table(route_dist,
route_family=VRF_RF_L2_EVPN,
route_type=route_type, is_withdraw=True, **kwargs)
return [{EVPN_ROUTE_TYPE: route_type,
ROUTE_DISTINGUISHER: route_dist,
VRF_RF: VRF_RF_L2_EVPN}.update(kwargs)]
except BgpCoreError as e:
raise PrefixError(desc=e)
|
Deletes/withdraws EVPN route from VRF identified by *route_dist*.
|
372,681
|
def fastqWrite(fileHandleOrFile, name, seq, qualValues, mode="w"):
fileHandle = _getFileHandle(fileHandleOrFile, mode)
assert seq.__class__ == "".__class__
for i in seq:
if not ((i >= and i <= ) or (i >= and i <= ) or i == ):
raise RuntimeError("Invalid FASTQ character, ASCII code = \, char = found in input sequence %s" % (ord(i), i, name))
if qualValues != None and qualValues != :
if len(seq) != len(qualValues):
raise RuntimeError("Got a mismatch between the number of sequence characters (%s) and number of qual values (%s) for sequence: %s " % (len(seq), len(qualValues), name))
for i in qualValues:
if i < 33 or i > 126:
raise RuntimeError("Got a qual value out of range %s (range is 33 to 126)" % i)
fileHandle.write("@%s\n%s\n+\n%s\n" % (name, seq, "".join([ chr(i) for i in qualValues ])))
else:
fileHandle.write("@%s\n%s\n+\n*\n" % (name, seq))
if isinstance(fileHandleOrFile, "".__class__):
fileHandle.close()
|
Writes out fastq file. If qualValues is None or '*' then prints a '*' instead.
|
372,682
|
def filter_duplicate(self, url):
if self.filterDuplicate:
if url in self.historys:
raise Exception( % url)
else:
self.historys.add(url)
else:
pass
|
url去重
|
372,683
|
def delete_Variable(self,name):
self.message(1,.format(name))
self.par_list=self.par_list[self.par_list != name]
return self.__dict__.pop(name)
|
pops a variable from class and delete it from parameter list
:parameter name: name of the parameter to delete
|
372,684
|
def tempfilename(**kwargs):
kwargs.update(delete=False)
try:
f = NamedTemporaryFile(**kwargs)
f.close()
yield f.name
except Exception:
if os.path.exists(f.name):
os.unlink(f.name)
raise
|
Reserve a temporary file for future use.
This is useful if you want to get a temporary file name, write to it in the
future and ensure that if an exception is thrown the temporary file is removed.
|
372,685
|
def delete_network(context, id):
LOG.info("delete_network %s for tenant %s" % (id, context.tenant_id))
with context.session.begin():
net = db_api.network_find(context=context, limit=None, sorts=[],
marker=None, page_reverse=False, id=id,
scope=db_api.ONE)
if not net:
raise n_exc.NetworkNotFound(net_id=id)
if not context.is_admin:
if STRATEGY.is_provider_network(net.id):
raise n_exc.NotAuthorized(net_id=id)
if net.ports:
raise n_exc.NetworkInUse(net_id=id)
net_driver = registry.DRIVER_REGISTRY.get_driver(net["network_plugin"])
net_driver.delete_network(context, id)
for subnet in net["subnets"]:
subnets._delete_subnet(context, subnet)
db_api.network_delete(context, net)
|
Delete a network.
: param context: neutron api request context
: param id: UUID representing the network to delete.
|
372,686
|
def draw(self):
devs = normal(size=self._sig.shape[1])
p = inner(self._sig, devs) + self._mu
self._set_stochastics(p)
|
N.draw()
Sets all N's stochastics to random values drawn from
the normal approximation to the posterior.
|
372,687
|
def get_my_feed(self, limit=150, offset=20, sort="updated", nid=None):
r = self.request(
method="network.get_my_feed",
nid=nid,
data=dict(
limit=limit,
offset=offset,
sort=sort
)
)
return self._handle_error(r, "Could not retrieve your feed.")
|
Get my feed
:type limit: int
:param limit: Number of posts from feed to get, starting from ``offset``
:type offset: int
:param offset: Offset starting from bottom of feed
:type sort: str
:param sort: How to sort feed that will be retrieved; only current
known value is "updated"
:type nid: str
:param nid: This is the ID of the network to get the feed
from. This is optional and only to override the existing
`network_id` entered when created the class
|
372,688
|
def load_mayaplugins():
mpp = os.environ.get()
if mpp is not None:
.join([mpp, MAYA_PLUGIN_PATH])
else:
mpp = MAYA_PLUGIN_PATH
os.environ[] = MAYA_PLUGIN_PATH
cmds.loadPlugin(allPlugins=True)
os.environ[] = mpp
|
Loads the maya plugins (not jukebox plugins) of the pipeline
:returns: None
:rtype: None
:raises: None
|
372,689
|
def insert_inexistence(self, table, kwargs, condition):
sql = "insert into " + table + " ({}) "
select = "select {} "
condition = "where not exists (select 1 from " + table + "{} limit 1);".format( self.parse_condition(condition) )
keys, values = [], []
[ (keys.append(k), values.append(v)) for k, v in kwargs.iteritems() ]
sql = sql.format(.join(keys)) + select.format( .join([]*len(values)) ) + condition
super(PGWrapper, self).execute(sql, values, result=False)
|
.. :py:method::
Usage::
>>> insert('hospital', {'id': '12de3wrv', 'province': 'shanghai'}, {'id': '12de3wrv'})
insert into hospital (id, province) select '12de3wrv', 'shanghai' where not exists (select 1 from hospital where id='12de3wrv' limit 1);
|
372,690
|
def search(self, search_phrase, limit=None):
from ambry.identity import ObjectNumber
from ambry.orm.exc import NotFoundError
from ambry.library.search_backends.base import SearchTermParser
results = []
stp = SearchTermParser()
parsed_terms = stp.parse(search_phrase)
for r in self.search_datasets(search_phrase, limit):
vid = r.vid or ObjectNumber.parse(next(iter(r.partitions))).as_dataset
r.vid = vid
try:
r.bundle = self.library.bundle(r.vid)
if not in parsed_terms or parsed_terms[] in r.bundle.dataset.source:
results.append(r)
except NotFoundError:
pass
return sorted(results, key=lambda r : r.score, reverse=True)
|
Search for datasets, and expand to database records
|
372,691
|
def dump_registers_peek(registers, data, separator = , width = 16):
if None in (registers, data):
return
names = compat.keys(data)
names.sort()
result =
for reg_name in names:
tag = reg_name.lower()
dumped = HexDump.hexline(data[reg_name], separator, width)
result += % (tag, dumped)
return result
|
Dump data pointed to by the given registers, if any.
@type registers: dict( str S{->} int )
@param registers: Dictionary mapping register names to their values.
This value is returned by L{Thread.get_context}.
@type data: dict( str S{->} str )
@param data: Dictionary mapping register names to the data they point to.
This value is returned by L{Thread.peek_pointers_in_registers}.
@rtype: str
@return: Text suitable for logging.
|
372,692
|
def _clean_tx(response_dict):
confirmed_txrefs = []
for confirmed_txref in response_dict.get(, []):
confirmed_txref[] = parser.parse(confirmed_txref[])
confirmed_txrefs.append(confirmed_txref)
response_dict[] = confirmed_txrefs
unconfirmed_txrefs = []
for unconfirmed_txref in response_dict.get(, []):
unconfirmed_txref[] = parser.parse(unconfirmed_txref[])
unconfirmed_txrefs.append(unconfirmed_txref)
response_dict[] = unconfirmed_txrefs
return response_dict
|
Pythonize a blockcypher API response
|
372,693
|
def setup(self, database):
self.db = database
self.hgnc_collection = database.hgnc_gene
self.user_collection = database.user
self.whitelist_collection = database.whitelist
self.institute_collection = database.institute
self.event_collection = database.event
self.case_collection = database.case
self.panel_collection = database.gene_panel
self.hpo_term_collection = database.hpo_term
self.disease_term_collection = database.disease_term
self.variant_collection = database.variant
self.acmg_collection = database.acmg
self.clinvar_collection = database.clinvar
self.clinvar_submission_collection = database.clinvar_submission
self.exon_collection = database.exon
self.transcript_collection = database.transcript
|
Setup connection to database.
|
372,694
|
def pack(o, stream, **kwargs):
msgpack_module = kwargs.pop(, msgpack)
orig_enc_func = kwargs.pop(, lambda x: x)
def _enc_func(obj):
obj = ThreadLocalProxy.unproxy(obj)
return orig_enc_func(obj)
return msgpack_module.pack(o, stream, default=_enc_func, **kwargs)
|
.. versionadded:: 2018.3.4
Wraps msgpack.pack and ensures that the passed object is unwrapped if it is
a proxy.
By default, this function uses the msgpack module and falls back to
msgpack_pure, if the msgpack is not available. You can pass an alternate
msgpack module using the _msgpack_module argument.
|
372,695
|
def delete_instance(self, instance_id):
title = % self.__class__.__name__
input_fields = {
: instance_id
}
for key, value in input_fields.items():
object_title = % (title, key, str(value))
self.fields.validate(value, % key, object_title)
self.iam.printer( % (instance_id, self.iam.region_name))
old_state = self.check_instance_state(instance_id)
tag_list = []
try:
response = self.connection.describe_tags(
Filters=[ { : , : [ instance_id ] } ]
)
import re
aws_tag_pattern = re.compile()
for i in range(0, len(response[])):
if not aws_tag_pattern.findall(response[][i][]):
tag = {}
tag[] = response[][i][]
tag[] = response[][i][]
tag_list.append(tag)
except:
raise AWSConnectionError(title)
try:
self.connection.delete_tags(
Resources=[ instance_id ],
Tags=tag_list
)
self.iam.printer( % instance_id)
except:
raise AWSConnectionError(title)
try:
self.connection.stop_instances(
InstanceIds=[ instance_id ]
)
except:
raise AWSConnectionError(title)
try:
response = self.connection.terminate_instances(
InstanceIds=[ instance_id ]
)
new_state = response[][0][][]
except:
raise AWSConnectionError(title)
self.iam.printer( % (instance_id, old_state))
self.iam.printer( % (instance_id, new_state))
return new_state
|
method for removing an instance from AWS EC2
:param instance_id: string of instance id on AWS
:return: string reporting state of instance
|
372,696
|
def train(self, recall=0.95, index_predicates=True):
examples, y = flatten_training(self.training_pairs)
self.classifier.fit(self.data_model.distances(examples), y)
self.predicates = self.active_learner.learn_predicates(
recall, index_predicates)
self.blocker = blocking.Blocker(self.predicates)
self.blocker.resetIndices()
|
Keyword arguments:
maximum_comparisons -- The maximum number of comparisons a
blocking rule is allowed to make.
Defaults to 1000000
recall -- The proportion of true dupe pairs in our training
data that that we the learned blocks must cover. If
we lower the recall, there will be pairs of true
dupes that we will never directly compare.
recall should be a float between 0.0 and 1.0, the default
is 0.95
index_predicates -- Should dedupe consider predicates that
rely upon indexing the data. Index predicates can
be slower and take susbstantial memory.
Defaults to True.
|
372,697
|
def get_supported_filepaths(filepaths, supported_extensions, max_depth=float()):
supported_filepaths = []
for path in filepaths:
if os.name == and CYGPATH_RE.match(path):
path = convert_cygwin_path(path)
if os.path.isdir(path):
for root, __, files in walk_depth(path, max_depth):
for f in files:
if f.lower().endswith(supported_extensions):
supported_filepaths.append(os.path.join(root, f))
elif os.path.isfile(path) and path.lower().endswith(supported_extensions):
supported_filepaths.append(path)
return supported_filepaths
|
Get filepaths with supported extensions from given filepaths.
Parameters:
filepaths (list or str): Filepath(s) to check.
supported_extensions (tuple or str): Supported file extensions or a single file extension.
max_depth (int): The depth in the directory tree to walk.
A depth of '0' limits the walk to the top directory.
Default: No limit.
Returns:
A list of supported filepaths.
|
372,698
|
def PC_PI_calc(P, TOP, POP):
try:
result = 0
for i in P.keys():
result += ((P[i] + TOP[i]) / (2 * POP[i]))**2
return result
except Exception:
return "None"
|
Calculate percent chance agreement for Scott's Pi.
:param P: condition positive
:type P : dict
:param TOP: test outcome positive
:type TOP : dict
:param POP: population
:type POP:dict
:return: percent chance agreement as float
|
372,699
|
def pylint_amnesty(pylint_output):
errors = defaultdict(lambda: defaultdict(set))
for pylint_error in parse_pylint_output(pylint_output):
errors[pylint_error.filename][pylint_error.linenum].add(pylint_error)
for file_with_errors in sorted(errors):
try:
opened_file = open(file_with_errors)
except IOError:
LOG.warning(u"Unable to open %s for edits", file_with_errors, exc_info=True)
else:
with opened_file as input_file:
output_lines = []
for line_num, line in enumerate(input_file, start=1):
output_lines.extend(
fix_pylint(
line,
errors[file_with_errors][line_num]
)
)
with open(file_with_errors, ) as output_file:
output_file.writelines(output_lines)
|
Add ``# pylint: disable`` clauses to add exceptions to all existing pylint errors in a codebase.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.