content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def get_object(node):
""" Parse rebaron AtomTrailers node into Python object (taken from ongoing conversion object)
Works for object and local scope """
if len(node) > 1 and (node[0].value == 'self' or node[0].value == 'self_next'):
var_t = super_getattr(convert_obj, str(node))
else:
# get the SOURCE function (where call is going on) from datamodel
def_parent = node.parent
while not isinstance(def_parent, DefNode):
def_parent = def_parent.parent
source_func_name = f'self.{def_parent.name}'
source_func_obj = super_getattr(convert_obj, str(source_func_name))
func_locals = source_func_obj.get_local_types()
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
struct = Struct(**func_locals)
var_t = super_getattr(struct, str(node), is_local=True)
return var_t
|
9b09dfaae08768e1544e676e93bcf5ce718c67d5
| 3,645,772
|
def read_label_from_txt(label_path):
"""Read label from txt file."""
text = np.fromfile(label_path)
bounding_box = []
with open(label_path, "r") as f:
labels = f.read().split("\n")
for label in labels:
if not label:
continue
label = label.split(" ")
if (label[0] == "DontCare"):
continue
if label[0] == ("Car" or "Van"): # or "Truck"
bounding_box.append(label[8:15])
if bounding_box:
data = np.array(bounding_box, dtype=np.float32)
return data[:, 3:6], data[:, :3], data[:, 6]
else:
return None, None, None
|
dd6158af3531ec003b57c4fa47282957c3cb72ea
| 3,645,773
|
from typing import Union
def _load_recipe(module, baked: bool = False) -> Union[BakedRecipe, Recipe]:
# load entry-point DAG
"""Load Queenbee plugin from Python package.
Usually you should not be using this function directly. Use ``load`` function
instead.
args:
module: Python module object for a Queenbee Recipe.
returns:
Recipe - A Queenbee recipe. It will be a baked recipe if baked is set to True.
"""
qb_info = module.__pollination__
package_name = module.__name__
main_dag_entry = qb_info.get('entry_point', None)
assert main_dag_entry, \
f'{package_name} __pollination__ info is missing the enetry_point key.'
main_dag = main_dag_entry()
# get metadata
metadata = _get_meta_data(module, 'recipe')
_dependencies = main_dag._dependencies
# create a queenbee Recipe object
# load dags
qb_dag = main_dag.queenbee
qb_dag.name = 'main'
dags = [qb_dag] + [dag.queenbee for dag in _dependencies['dag']]
# add dependencies
repo = _init_repo()
plugins = [
Dependency(
kind=DependencyKind.plugin, name=plugin['name'], tag=plugin['tag'],
source=repo.as_uri()
) for plugin in _dependencies['plugin']
]
recipes = [
Dependency(
kind=DependencyKind.recipe, name=recipe['name'], tag=recipe['tag'],
source=repo.as_uri()
) for recipe in _dependencies['recipe']
]
recipe = Recipe(metadata=metadata, dependencies=plugins + recipes, flow=dags)
if baked:
package_recipe_dependencies(recipe)
rf = RepositoryReference(
name='pollination-dsl', path='file:///' + repo.as_posix()
)
config = Config(repositories=[rf])
recipe = BakedRecipe.from_recipe(recipe=recipe, config=config)
return recipe
|
d0c400a234a777438418c4eb605723ea67509077
| 3,645,774
|
def compute_coeffs(shape, Aref, alfa):
"""Computes the lift and drag coefficients of the given shape at the given
angle of attack using the given reference area"""
alfa_vect = np.array([-np.sin(alfa),0,-np.cos(alfa)])
Fvect = np.array([0,0,0]) #Force coefficient vector
for panel in shape:
panel.alfa = np.arcsin(np.dot(alfa_vect,-panel.N)/ \
(np.linalg.norm(alfa_vect)*np.linalg.norm(panel.N)))
panel_Cpvect = (panel.A/Aref) * (2*np.sin(panel.alfa)**2) * (-panel.N/np.linalg.norm(panel.N))
Fvect = Fvect + panel_Cpvect
CN = -Fvect[0]#np.dot(Fvect,np.array([-1,0,0]))
CA = -Fvect[2]#np.dot(Fvect,np.array([0,0,-1]))
CL = CN * np.cos(alfa) - CA * np.sin(alfa)
CD = CA * np.cos(alfa) + CN * np.sin(alfa)
#return CA, CN
return CL, CD
|
48cd922a460c56961cf2ebc2a3ecfc121622fe26
| 3,645,775
|
def draw_pitch(axis, rotate=False):
"""
Plots the lines of a soccer pitch using matplotlib.
Arguments
---------
axis : matplotlib.axes._subplots.AxesSubplot
- matplotlib axis object on which to plot shot freeze frame
rotate : bool
- if set to True, pitch is horizontal,
default to False
Returns
-------
None
"""
line_width = 4
alpha = 0.5
r = 10
line_coords = [[[0, 0], [0, 120]], [[0, 80], [120, 120]],
[[80, 80], [120, 0]], [[0, 80], [0, 0]],
[[0, 80], [60, 60]], [[18, 18], [0, 18]],
[[18, 62], [18, 18]], [[62, 62], [0, 18]],
[[30, 30], [0, 6]], [[30, 50], [6, 6]], [[50, 50], [0, 6]],
[[18, 18], [120, 102]], [[18, 62], [102, 102]],
[[62, 62], [102, 120]], [[30, 30], [120, 114]],
[[30, 50], [114, 114]], [[50, 50], [120, 114]]]
if not rotate:
for lines in line_coords:
axis.plot(lines[0], lines[1], color='grey',
linewidth=line_width, alpha=alpha)
theta1 = np.linspace(0, 2*np.pi, 100)
theta2 = np.linspace(0.65, 2.47, 100)
theta3 = np.linspace(3.8, 5.6, 100)
x1 = r*np.cos(theta1) + 40
x2 = r*np.sin(theta1) + 60
x3 = r*np.cos(theta2) + 40
x4 = r*np.sin(theta2) + 12
x5 = r*np.cos(theta3) + 40
x6 = r*np.sin(theta3) + 108
axis.plot(x1, x2, color='grey', linewidth=line_width,
alpha=alpha)
axis.plot(x3, x4, color='grey', linewidth=line_width,
alpha=alpha)
axis.plot(x5, x6, color='grey', linewidth=line_width,
alpha=alpha)
else:
for lines in line_coords:
axis.plot([-(lines[1][0]-40) + 80, -(lines[1][1]-40) + 80],
[lines[0][0], lines[0][1]], color='grey',
linewidth=line_width, alpha=alpha)
theta1 = np.linspace(0, 2*np.pi, 100)
theta2 = np.linspace(5.4, 7.2, 100)
theta3 = np.linspace(2.2, 4, 100)
x1 = r*np.cos(theta1) + 60
x2 = r*np.sin(theta1) + 40
x3 = r*np.cos(theta2) + 12
x4 = r*np.sin(theta2) + 40
x5 = r*np.cos(theta3) + 108
x6 = r*np.sin(theta3) + 40
axis.plot(x1, x2, color='grey', linewidth=line_width,
alpha=alpha)
axis.plot(x3, x4, color='grey', linewidth=line_width,
alpha=alpha)
axis.plot(x5, x6, color='grey', linewidth=line_width,
alpha=alpha)
return axis
|
24a5b0de75ed70f6ce37afab8e04cf03afaa4651
| 3,645,776
|
import click
def file(filename, searchspec=None, searchtype=None, list_images=False,
sort_by=None, fields=None):
"""Examine images from a local file."""
if not list_images:
if searchtype is None or searchspec is None:
raise click.BadParameter(
'SEARCHTYPE and SEARCHSPEC must be specified when not listing '
'images')
try:
images = sources.read_images_file(filename)
except Exception as e:
abort(e)
if list_images:
return _list_images(images)
_process_images(searchtype, images, searchspec, sort_by, fields)
|
6162ac7553f04225a6cad0fe262f2cc97dad39a2
| 3,645,777
|
def clip_to_norm(array, clip):
"""Clips the examples of a 2-dimensional array to a given maximum norm.
Parameters
----------
array : np.ndarray
Array to be clipped. After clipping, all examples have a 2-norm of at most `clip`.
clip : float
Norm at which to clip each example
Returns
-------
array : np.ndarray
The clipped array.
"""
if not isinstance(array, np.ndarray):
raise TypeError(f"Input array must be a numpy array, got {type(array)}.")
if array.ndim != 2:
raise ValueError(f"input array must be 2-dimensional, got {array.ndim} dimensions.")
if not isinstance(clip, Real):
raise TypeError(f"Clip value must be numeric, got {type(clip)}.")
if clip <= 0:
raise ValueError(f"Clip value must be strictly positive, got {clip}.")
norms = np.linalg.norm(array, axis=1) / clip
norms[norms < 1] = 1
return array / norms[:, np.newaxis]
|
e7dca2cf9f129736ebc5f7909cb4fed41a4c7996
| 3,645,778
|
def getlineno(frame):
"""Get the line number from a frame object, allowing for optimization."""
# FrameType.f_lineno is now a descriptor that grovels co_lnotab
return frame.f_lineno
|
b8c8d6fb3ebb8784d10250a42526b31e185e9b7a
| 3,645,779
|
def _batch_sum(F, loss, batch_axis):
"""Return sum on the specified batch axis, not keeping the axis"""
if is_np_array():
axes = list(range(loss.ndim))
del axes[batch_axis]
return F.np.sum(loss, axis=axes)
else:
return F.sum(loss, axis=batch_axis, exclude=True)
|
71bad3e21c1905e81b0e40b5be08ebc0426e1ca7
| 3,645,780
|
def tree_cons(a, tree: Pytree) -> Pytree:
"""
Prepend ``a`` in all tuples of the given tree.
"""
return jax.tree_map(
lambda x: _OpaqueSequence((a,) + tuple(x)),
tree,
is_leaf=lambda x: isinstance(x, tuple),
)
|
1d82ca0a7b49d8e803fe8771f4f6b697826f5e27
| 3,645,781
|
from datetime import datetime
def add(moment: datetime) -> datetime:
"""Add one gigasecond to a given date and time."""
return moment + GIGASECOND
|
87e009e408088d6c91ceb409408608947c0b9fd3
| 3,645,782
|
def convert_to_short_log(log_level, message):
"""Convert a log message to its shorter format.
:param log_level: enum - 'LogLevel.<level>' e.g. 'LogLevel.Error'
:param message: str - log message
:return: enum - 'LogLevelInt.<value>` e.g. 'LogLevelInt.5'
"""
return f'{LogLevelInt[log_level.name].value}:{message}'
|
0d4c20ac4ec809dbb58494ef928586c95da88fbb
| 3,645,784
|
import zipfile
import io
import pathlib
import warnings
def load_map(path, callback=None, meta_override=None):
"""Load a set of zipped csv AFM workshop data
If you are recording quantitative force-maps (i.e. multiple
curves on an x-y-grid) with AFM workshop setups, then you
might have realized that you get *multiple* .csv files (one
file per indentation) instead of *one* file that contains all
the data (as you might be accustomed to from other
manufacturers). Since afmformats expects one file per
measurement, it would not be straight forward to obtain a
properly enumerated quantitative imaging group.
This function offers a workaround - it loads a zip archive
created from the the .csv files.
The files are structured like this::
Force-Distance Curve
File Format: 3
Date: Wednesday, August 1, 2018
Time: 1:07:47 PM
Mode: Mapping
Point: 16
X, um: 27.250000
Y, um: 27.250000
Extend Z-Sense(nm),Extend T-B(V),Retract Z-Sense(nm),Retract T-B(V)
13777.9288,0.6875,14167.9288,1.0917
13778.9288,0.6874,14166.9288,1.0722
13779.9288,0.6876,14165.9288,1.0693
13780.9288,0.6877,14164.9288,1.0824
13781.9288,0.6875,14163.9288,1.0989
...
Please make sure that the ``Point`` is enumerated from 1
onwards (and matches the alphanumerical order of the files in
the archive) and that ``Mode`` is ``Mapping``. The ``X`` and
``Y`` coordinates can be used by e.g. PyJibe to display QMap
data on a grid.
Parameters
----------
path: str or pathlib.Path
path to zip file containing AFM workshop .csv files
callback: callable
function for progress tracking; must accept a float in
[0, 1] as an argument.
meta_override: dict
if specified, contains key-value pairs of metadata that
are used when loading the files
(see :data:`afmformats.meta.META_FIELDS`)
"""
datasets = []
with zipfile.ZipFile(path) as arc:
names = sorted(arc.namelist())
for ii, name in enumerate(names):
with arc.open(name, "r") as fd:
tfd = io.TextIOWrapper(fd, encoding="utf-8")
dd = load_csv(
tfd,
# recurse into callback with None as default
callback=lambda x: callback((ii + x) / len(names))
if callback is not None else None,
meta_override=meta_override,
mode="mapping")
dd[0]["metadata"]["path"] = pathlib.Path(path)
cur_enum = dd[0]["metadata"]["enum"]
if cur_enum != ii + 1:
warnings.warn("Dataset 'Point' enumeration mismatch for "
f"'{name}' in '{path}' (expected {ii + 1}, "
f"got {cur_enum})!",
AFMWorkshopFormatWarning)
datasets += dd
# Populate missing grid metadata
xvals = list(set([ad["metadata"]["position x"] for ad in datasets]))
yvals = list(set([ad["metadata"]["position y"] for ad in datasets]))
mdgrid = {
"grid center x": np.mean(xvals),
"grid center y": np.mean(yvals),
"grid shape x": len(xvals),
"grid shape y": len(yvals),
# grid size in um includes boundaries of pixels
"grid size x": np.ptp(xvals)*(1 + 1/(len(xvals)-1)),
"grid size y": np.ptp(yvals)*(1 + 1/(len(yvals)-1)),
}
# Update with new metadata (note that grid index x/y is populated via
# MetaData._autocomplete_grid_metadata)
[ad["metadata"].update(mdgrid) for ad in datasets]
return datasets
|
bdf98f10a5decfc9a4dbd4b9cc90c75c7c95e76d
| 3,645,785
|
def format_len(x):
"""
>>> format_len('abc')
3
>>> format_len(('(', ('(', 'def', ')'), 'yz', ')'))
11
"""
if not isinstance(x, (list, tuple)): return len(x)
if len(x) > 3: sep_len = 2 * (len(x) - 3)
else: sep_len = 0
return sum(map(format_len, x)) + sep_len
|
723afb58bfed0cfb7fbd25a12b86b257bf8b40df
| 3,645,786
|
def set_doi_ark(page_number, records_per_page, sort_on, doi_ark_value):
"""
Retrieve all metadata records for admin view. Retrieval is done
via POST because we must pass a session id so that the user is
authenticated.
Access control is done here. A user can modify only their own records
because their session_id sent with the request.
"""
username = _authenticate_admin_from_session(request)
#pageNumber is 0 based index. Need first page to start at 0 for math for setting arrayLowerBound and arrayUpperBound.
try:
if username:
if request.method == 'POST':
#need to do input sanitization on all these values! Separating variables so outside does not have direct access to
#database query.
sort_by = validate_admin_sort_by(sort_on)
record_list = Metadata.objects(__raw__={'published':'pending'}).order_by(sort_by)
arrayLowerBound = int(page_number) * int(records_per_page)
arrayUpperBound = int(page_number) * int(records_per_page) + int(records_per_page)
#Only return array elements between indicies. Don't want to return all possible values
#and overload browser with too much data. This is a version of 'pagination.'
return jsonify(dict(results=record_list[arrayLowerBound:arrayUpperBound], num_entries=(len(record_list)/int(records_per_page))))
else:
return Response('Bad or missing session id.', status=401)
except:
return Response('Bad request for records', 400)
|
2b718232463a632dc07f73c1e6e5c3299ff57f18
| 3,645,788
|
def empiriline(x,p,L):
"""
Use the line L (which is an EmissionLine object) as a template.
The line is shifted, then interpolated, then rescaled, and allowed
to float.
"""
xnew = x - p[1]
yout = sp.zeros(len(xnew))
m = (xnew >= L.wv.min())*(xnew <= L.wv.max() )
ynew,znew = L.interp(xnew[m])
yout[m] = p[0]*ynew + p[2]
return yout
|
a0199a4623e834524711d0e78787d409da29d3ef
| 3,645,790
|
def _get_z_slice_fn(z, data_dir):
"""Get array slice map to be applied to z dimension
Args:
z: String or 1-based index selector for z indexes constructed as any of the following:
- "best": Indicates that z slices should be inferred based on focal quality
- "all": Indicates that a slice for all z-planes should be used
- str or int: A single value will be interpreted as a single index
- tuple: A 2-item or 3-item tuple forming the slice (start, stop[, step]); stop is inclusive
- list: A list of integers will be used as is
data_dir: Data directory necessary to infer 'best' z planes
Returns:
A function with signature (region_index, tile_x, tile_y) -> slice_for_array where slice_for_array
will either be a slice instance or a list of z-indexes (Note: all indexes are 0-based)
"""
if not z:
raise ValueError('Z slice cannot be defined as empty value (given = {})'.format(z))
# Look for keyword strings
if isinstance(z, str) and z == 'best':
map = function_data.get_best_focus_coord_map(data_dir)
return lambda ri, tx, ty: [map[(ri, tx, ty)]]
if isinstance(z, str) and z == 'all':
return lambda ri, tx, ty: slice(None)
# Parse argument as 1-based index list and then convert to 0-based
zi = cli.resolve_index_list_arg(z, zero_based=True)
return lambda ri, tx, ty: zi
|
1982258bb98205fe4552de4db8b4e049e09af4bf
| 3,645,791
|
def bar_data_wrapper(func):
"""Standardizes column names for any bar data"""
def wrapper(*args, **kwargs):
assert Ticker(args[0])
res: pd.DataFrame = func(*args, **kwargs)
return res.rename(columns=COL_NAMES).iterrows()
return wrapper
|
4cfa9614ec430ba53ac3e86dc68c6a84ee3cfbee
| 3,645,792
|
import torch
def rgb_to_grayscale(
image: Tensor, rgb_weights: list[float] = [0.299, 0.587, 0.114]
) -> Tensor:
"""Convert an RGB image to grayscale version of image. Image data is
assumed to be in the range of [0.0, 1.0].
Args:
image (Tensor[B, 3, H, W]):
RGB image to be converted to grayscale.
rgb_weights (list[float]):
Weights that will be applied on each channel (RGB). Sum of the
weights should add up to one.
Returns:
grayscale (Tensor[B, 1, H, W]):
Grayscale version of the image.
"""
rgb_weights = torch.FloatTensor(rgb_weights)
if not isinstance(rgb_weights, Tensor):
raise TypeError(f"`rgb_weights` must be a `Tensor`. "
f"But got: {type(rgb_weights)}.")
if rgb_weights.shape[-1] != 3:
raise ValueError(f"`rgb_weights` must have a shape of [*, 3]. "
f"But got: {rgb_weights.shape}.")
r = image[..., 0:1, :, :]
g = image[..., 1:2, :, :]
b = image[..., 2:3, :, :]
if not torch.is_floating_point(image) and (image.dtype != rgb_weights.dtype):
raise ValueError(f"`image` and `rgb_weights` must have the same dtype. "
f"But got: {image.dtype} and {rgb_weights.dtype}.")
w_r, w_g, w_b = rgb_weights.to(image).unbind()
return w_r * r + w_g * g + w_b * b
|
d135a92e7189a745b2d9658b2b60a91c238fdbf8
| 3,645,793
|
def _aware_to_agnostic(fr: NDFrame) -> NDFrame:
"""Recalculate values in tz-aware series or dataframe, to get a tz-agnostic one.
(i.e., A to B)."""
if not fr.index.tz:
raise ValueError("``fr`` must be tz-aware.")
idx_out = _idx_after_conversion(fr, None)
# Convert daily or longer.
if stamps.freq_shortest(idx_out.freq, "D") == "D":
# One-to-one correspondence between the timestamps in input and ouput frames.
# --> Simply replace the index.
return fr.set_axis(idx_out)
# Convert hourly or shorter.
# There are timestamps in the output that do not exist in the input. In that case,
# repeat the value of the previous hour.
partly = fr.tz_localize(None)
partly = partly[~partly.index.duplicated()] # remove duplicates
def value(ts): # Take value of prev hour if current time not found in the input.
try:
return partly.loc[ts]
except KeyError:
return partly.loc[ts - pd.Timedelta(hours=1)]
return fr.__class__([value(ts) for ts in idx_out], index=idx_out)
|
c97b190828d5364033336b2dcc48d352aafe1132
| 3,645,795
|
import math
def cumulative_prob_to_value(prob, hp):
"""Convert a value from [0, 1] to a hyperparameter value."""
if isinstance(hp, Fixed):
return hp.value
elif isinstance(hp, Boolean):
return bool(prob >= 0.5)
elif isinstance(hp, Choice):
ele_prob = 1 / len(hp.values)
index = math.floor(prob / ele_prob)
# Can happen when `prob` is very close to 1.
if index == len(hp.values):
index = index - 1
return hp.values[index]
elif isinstance(hp, (Int, Float)):
sampling = hp.sampling or 'linear'
if sampling == 'linear':
value = prob * (hp.max_value - hp.min_value) + hp.min_value
elif sampling == 'log':
value = hp.min_value * math.pow(hp.max_value / hp.min_value, prob)
elif sampling == 'reverse_log':
value = (hp.max_value + hp.min_value -
hp.min_value * math.pow(hp.max_value / hp.min_value, 1 - prob))
else:
raise ValueError('Unrecognized sampling value: {}'.format(sampling))
if hp.step is not None:
values = np.arange(hp.min_value, hp.max_value + 1e-7, step=hp.step)
closest_index = np.abs(values - value).argmin()
value = values[closest_index]
if isinstance(hp, Int):
return int(value)
return value
else:
raise ValueError('Unrecognized HyperParameter type: {}'.format(hp))
|
d66e69f4cb580f8d3ce3004c082239717fd2854a
| 3,645,796
|
import podpac
import podpac.datalib # May not be imported by default
import inspect
def get_ui_node_spec(module=None, category="default"):
"""
Returns a dictionary describing the specifications for each Node in a module.
Parameters
-----------
module: module
The Python module for which the ui specs should be summarized. Only the top-level
classes will be included in the spec. (i.e. no recursive search through submodules)
category: str, optional
Default is "default". Top-level category name for the group of Nodes.
Returns
--------
dict
Dictionary of {category: {Node1: spec_1, Node2: spec2, ...}} describing the specs for each Node.
"""
spec = {}
def get_ui_spec(cls):
filter = []
spec = {"help": cls.__doc__, "module": cls.__module__ + "." + cls.__name__, "attrs": {}}
for attr in dir(cls):
if attr in filter:
continue
attrt = getattr(cls, attr)
if not isinstance(attrt, tl.TraitType):
continue
if "attr" not in attrt.metadata:
continue
type_ = attrt.__class__.__name__
type_extra = str(attrt)
if type_ == "Union":
type_ = [t.__class__.__name__ for t in attrt.trait_types]
type_extra = "Union"
elif type_ == "Instance":
type_ = attrt.klass.__name__
type_extra = attrt.klass
default_val = attrt.default()
if not isinstance(type_extra, str):
type_extra = str(type_extra)
try:
if np.isnan(default_val):
default_val = 'nan'
except:
pass
if default_val == tl.Undefined:
default_val = None
spec["attrs"][attr] = {
"type": type_,
"type_str": type_extra, # May remove this if not needed
"values": getattr(attrt, "values", None),
"default": default_val,
"help": attrt.help,
}
spec.update(getattr(cls, "_ui_spec", {}))
return spec
if module is None:
modcat = zip(
[podpac.data, podpac.algorithm, podpac.compositor, podpac.datalib],
["data", "algorithms", "compositors", "datalib"],
)
for mod, cat in modcat:
spec.update(get_ui_node_spec(mod, cat))
return spec
spec[category] = {}
for obj in dir(module):
ob = getattr(module, obj)
if not inspect.isclass(ob):
continue
if not issubclass(ob, podpac.Node):
continue
spec[category][obj] = get_ui_spec(ob)
return spec
|
17978a9ebb50696990f601f449a0539bcf67c3dd
| 3,645,797
|
def parse_branch_name(branch_name):
"""Split up a branch name of the form 'ocm-X.Y[-mce-M.N].
:param branch_name: A branch name. If of the form [remote/]ocm-X.Y[-mce-M.N] we will parse
it as noted below; otherwise the first return will be False.
:return parsed (bool): True if the branch_name was parseable; False otherwise.
:return remote (str): If parsed and the branch_name contained a remote/ prefix, it is
returned here; otherwise this is the empty string.
:return prefix (str): Two-digit semver prefix of the bundle to be generated. If the branch
name is of the form [remote/]ocm-X.Y, this will be X.Y; if of the form
[remote/]ocm-X.Y-mce-M.N it will be M.N. If not parseable, it will be the empty string.
:return channel (str): The name of the channel in which we'll include the bundle. If the
branch name is of the form [remote/]ocm-X.Y, this will be ocm-X.Y; if of the form
[remote/]ocm-X.Y-mce-M.N it will be mce-M.N. If not parseable, it will be the empty
string.
"""
m = MCE_BRANCH_RE.match(branch_name)
if m:
return True, m.group(1), m.group(2), m.group(3)
m = OCM_BRANCH_RE.match(branch_name)
if m:
return True, m.group(1), m.group(3), m.group(2)
return False, '', '', ''
|
2cb53aef0754c815dd61d4a1b640e12db64fbf83
| 3,645,798
|
import itertools
def symmetric_padding(
arr,
width):
"""
Pad an array using symmetric values.
This is equivalent to `np.pad(mode='symmetric')`, but should be faster.
Also, the `width` parameter is interpreted in a more general way.
Args:
arr (np.ndarray): The input array.
width (int|float|Iterable[int|float]): Size of the padding to use.
This is used with `flyingcircus.base.multi_scale_to_int()`.
The shape of the array is used for the scales.
Returns:
result (np.ndarray): The padded array.
Examples:
>>> arr = arange_nd((2, 3)) + 1
>>> print(arr)
[[1 2 3]
[4 5 6]]
>>> new_arr = symmetric_padding(arr, (1, 2))
>>> print(new_arr)
[[2 1 1 2 3 3 2]
[2 1 1 2 3 3 2]
[5 4 4 5 6 6 5]
[5 4 4 5 6 6 5]]
>>> new_arr = symmetric_padding(arr, ((0, 1), 2))
>>> print(new_arr)
[[2 1 1 2 3 3 2]
[5 4 4 5 6 6 5]
[5 4 4 5 6 6 5]]
>>> new_arr = symmetric_padding(arr, ((1, 0), 2))
>>> print(new_arr)
[[2 1 1 2 3 3 2]
[2 1 1 2 3 3 2]
[5 4 4 5 6 6 5]]
>>> new_arr = symmetric_padding(arr, ((0, 1.0),))
>>> print(new_arr)
[[1 2 3 3 2 1]
[4 5 6 6 5 4]
[4 5 6 6 5 4]
[1 2 3 3 2 1]]
>>> arr = arange_nd((5, 7, 11)) + 1
>>> np.all(symmetric_padding(arr, 17) == np.pad(arr, 17, 'symmetric'))
True
"""
width = fc.base.multi_scale_to_int(width, arr.shape)
if any(any(size for size in sizes) for sizes in width):
shape = tuple(
low + dim + up for dim, (low, up) in zip(arr.shape, width))
result = np.zeros(shape, dtype=arr.dtype)
target_slices = tuple(
tuple(
slice(
max((i - (1 if low % dim else 0)) * dim + low % dim, 0),
min((i + 1 - (1 if low % dim else 0)) * dim + low % dim,
low + dim + up))
for i in range(
fc.base.div_ceil(low, dim) + fc.base.div_ceil(up,
dim) + 1))
for dim, (low, up) in zip(arr.shape, width))
len_target_slices = tuple(len(items) for items in target_slices)
parities = tuple(
fc.base.div_ceil(low, dim) % 2
for dim, (low, up) in zip(arr.shape, width))
for i, target_slicing in enumerate(itertools.product(*target_slices)):
ij = np.unravel_index(i, len_target_slices)
source_slicing = []
for idx, target_slice, parity, dim in \
zip(ij, target_slicing, parities, arr.shape):
step = 1 if idx % 2 == parity else -1
start = stop = None
span = target_slice.stop - target_slice.start
if span != dim:
if target_slice.start == 0:
start = \
(dim - span) if idx % 2 == parity else (span - 1)
else:
stop = \
span if idx % 2 == parity else (dim - span - 1)
source_slicing.append(slice(start, stop, step))
source_slicing = tuple(source_slicing)
result[target_slicing] = arr[source_slicing]
else:
result = arr
return result
|
dd91b4f3641332ecfffa734584fd87293c7169ee
| 3,645,799
|
def _uid_or_str(node_or_entity):
""" Helper function to support the transition from `Entitie`s to `Node`s.
"""
return (
node_or_entity.uid
if hasattr(node_or_entity, "uid")
else str(node_or_entity)
)
|
82f5747e8c73e1c167d351e1926239f17ea37b98
| 3,645,800
|
def power(maf=0.5,beta=0.1, N=100, cutoff=5e-8):
"""
estimate power for a given allele frequency, effect size beta and sample size N
Assumption:
z-score = beta_ML distributed as p(0) = N(0,1.0(maf*(1-maf)*N))) under the null hypothesis
the actual beta_ML is distributed as p(alt) = N( beta , 1.0/(maf*(1-maf)N) )
Arguments:
maf: minor allele frequency of the SNP
beta: effect size of the SNP
N: sample size (number of individuals)
Returns:
power: probability to detect a SNP in that study with the given parameters
"""
"""
std(snp)=sqrt(2.0*maf*(1-maf))
power = \int
beta_ML = (snp^T*snp)^{-1}*snp^T*Y = cov(snp,Y)/var(snp)
E[beta_ML] = (snp^T*snp)^{-1}*snp^T*E[Y]
= (snp^T*snp)^{-1}*snp^T*snp * beta
= beta
Var[beta_ML]= (snp^T*snp)^{-1}*(snp^T*snp)*(snp^T*snp)^{-1}
= (snp^T*snp)^{-1}
= 1/N * var(snp)
= 1/N * maf*(1-maf)
"""
assert maf>=0.0 and maf<=0.5, "maf needs to be between 0.0 and 0.5, got %f" % maf
if beta<0.0:
beta=-beta
std_beta = 1.0/np.sqrt(N*(2.0 * maf*(1.0-maf)))
non_centrality = beta
beta_samples = np.random.normal(loc=non_centrality, scale=std_beta)
n_grid = 100000
beta_in = np.arange(0.5/(n_grid+1.0),(n_grid-0.5)/(n_grid+1.0),1.0/(n_grid+1.0))
beta_theoretical = ((st.norm.isf(beta_in)* std_beta) + non_centrality)
pvals = st.chi2.sf( (beta_theoretical/std_beta)*(beta_theoretical/std_beta) ,1.0)
power = (pvals<cutoff).mean()
return power, pvals
|
1806718cd0af5deb38a25a90864bb14f40e2c57a
| 3,645,801
|
def get_rotation_matrix(angle: float, direction: np.ndarray, point: np.ndarray = None) -> np.ndarray:
"""Compute rotation matrix relative to point and direction
Args:
angle (float): angle of rotation in radian
direction (np.ndarray): axis of rotation
point (np.ndarray, optional): center of rotation. Defaults to None.
Returns:
np.ndarray: rotation_matrix
"""
sina = np.sin(angle)
cosa = np.cos(angle)
direction = direction[:3] / np.linalg.norm(direction[:3])
M = np.diag([cosa, cosa, cosa, 1.0])
M[:3, :3] += np.outer(direction, direction) * (1.0 - cosa)
direction = direction * sina
M[:3, :3] += np.array([[0.0, -direction[2], direction[1]], [direction[2], 0.0, -direction[0]],
[-direction[1], direction[0], 0.0]])
# if point is specified, rotation is not around origin
if point is not None:
point = np.array(point[:3], dtype=np.float64, copy=False)
M[:3, 3] = point - np.dot(M[:3, :3], point)
return M
|
fd7c8d22368b51310a85453f6a9732f56a443803
| 3,645,802
|
def answer(panel_array):
""" Returns the maximum product of positive and (odd) negative numbers."""
print("panel_array=", panel_array)
# Edge case I: no panels :]
if (len(panel_array) == 0):
return str(0)
# Get zero panels.
zero_panels = list(filter(lambda x: x == 0 , panel_array))
print("zero_panels=", zero_panels)
# Edge case II: no positive nor negative panels.
if (len(zero_panels) == len(panel_array)):
return str(0)
# Get positive panels
positive_panels = list(filter(lambda x: x >0 , panel_array))
print("positive_panels=", positive_panels)
positive_product = 1
for x in positive_panels:
positive_product *= x
# Get negative panels.
negative_panels = sorted(list(filter(lambda x: x <0 , panel_array)))
print("negative_panels=", negative_panels)
# Edge case III: there is only one "negative panel".
if (len(negative_panels) == 1):
# If this is the only panel.
if (len(panel_array) == 1):
return negative_panels[0]
# If there are no positive panels, but there are some panels with zeros
elif (len(positive_panels) == 0) and (len(zero_panels) > 1):
return 0
# Check number of negative panels.
if len(negative_panels) % 2 != 0:
# Remove smallest.
negative_panels.pop()
print("final negative_panels=", negative_panels)
negative_product = 1
for x in negative_panels:
negative_product *= x
# Return product of those two.
return str(negative_product * positive_product)
|
7169fba8dcf6c0932722dcbc606d6d60fdaf3ed1
| 3,645,805
|
def load_fromh5(filepath, dir_structure, slice_num, strt_frm=0):
"""
load_fromh5 will extract the sinogram from the h5 file
Output: the sinogram
filepath: where the file is located in the system
dir_structure: the h5 file directory structure
slice_num: the slice where the singoram will be extracted
strt_frm (optional): where the sinogram should begin
"""
f = h5py.File(filepath, 'r')
#["entry/data/data"]
print(f[dir_structure].shape)
end_frm = f[dir_structure].shape[0]
sino = f[dir_structure][int(strt_frm):int(end_frm),int(slice_num),:] #For APS 2BM h5 file format
return sino
|
90aa278a7429cc832071a374df9de2d8dd2abb88
| 3,645,806
|
def lqr_6_2(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None):
"""Returns an LQR environment with 6 bodies of which first 2 are actuated."""
return _make_lqr(
n_bodies=6,
n_actuators=2,
control_cost_coef=_CONTROL_COST_COEF,
time_limit=time_limit,
random=random,
environment_kwargs=environment_kwargs,
)
|
b27a4fd55d67cfcbb9a651b7915fd2e3b4460af9
| 3,645,807
|
def machine_stop(request, tenant, machine):
"""
Stop (power off) the specified machine.
"""
with request.auth.scoped_session(tenant) as session:
serializer = serializers.MachineSerializer(
session.stop_machine(machine),
context = { "request": request, "tenant": tenant }
)
return response.Response(serializer.data)
|
b91a375c1aa8b62a6ed665d0045ff4b9eeae6a18
| 3,645,808
|
from typing import Tuple
from typing import Dict
from typing import Any
def _get_input_value(arg: Tuple[str, GraphQLArgument]) -> Dict[str, Any]:
"""Compute data for the InputValue fragment of the introspection query for a particular arg."""
return {
"name": __InputValue.fields["name"].resolve(arg, None),
"description": __InputValue.fields["description"].resolve(arg, None),
"type": _get_type_ref(__InputValue.fields["type"].resolve(arg, None)),
"defaultValue": __InputValue.fields["defaultValue"].resolve(arg, None),
}
|
7e82936b07b01531b0716c6904709c37e807d868
| 3,645,810
|
def wrapper(X_mixture,X_component):
""" Takes in 2 arrays containing the mixture and component data as
numpy arrays, and prints the estimate of kappastars using the two gradient
thresholds as detailed in the paper as KM1 and KM2"""
N=X_mixture.shape[0]
M=X_component.shape[0]
best_width,kernel=compute_best_rbf_kernel_width(X_mixture,X_component)
lambda_values=np.array([1.00,1.05])
dists=get_distance_curve(kernel,lambda_values,N=N,M=M)
begin_slope=(dists[1]-dists[0])/(lambda_values[1]-lambda_values[0])
dist_diff = np.concatenate((np.ones((N, 1)) / N, -1 * np.ones((M,1)) / M))
distribution_RKHS_dist = sqrt(np.dot(dist_diff.T, np.dot(kernel, dist_diff))[0,0])
thres_par=0.2
nu1=(1-thres_par)*begin_slope + thres_par*distribution_RKHS_dist
nu1=nu1/distribution_RKHS_dist
lambda_star_est_1=mpe(kernel,N,M,nu=nu1)
kappa_star_est_1=(lambda_star_est_1-1)/lambda_star_est_1
nu2=1/sqrt(np.min([M,N]))
nu2=nu2/distribution_RKHS_dist
if nu2>0.9:
nu2=nu1
lambda_star_est_2=mpe(kernel,N,M,nu=nu2)
kappa_star_est_2=(lambda_star_est_2-1)/lambda_star_est_2
return (kappa_star_est_2,kappa_star_est_1)
|
f5e093590897c363bbab2360a14d7c3a82fd6bcd
| 3,645,811
|
import torch
def iou(
outputs: torch.Tensor,
targets: torch.Tensor,
eps: float = 1e-7,
threshold: float = 0.5,
activation: str = "sigmoid"
):
"""
Args:
outputs (torch.Tensor): A list of predicted elements
targets (torch.Tensor): A list of elements that are to be predicted
eps (float): epsilon to avoid zero division
threshold (float): threshold for outputs binarization
activation (str): An torch.nn activation applied to the outputs.
Must be one of ['none', 'sigmoid', 'softmax2d']
Returns:
float: IoU (Jaccard) score
"""
activation_fn = get_activation_by_name(activation)
outputs = activation_fn(outputs)
if threshold is not None:
outputs = (outputs > threshold).float()
intersection = torch.sum(targets * outputs)
union = torch.sum(targets) + torch.sum(outputs) - intersection + eps
return (intersection + eps) / union
|
4c43832560126c19b8b9ebc01daf3920603b5f17
| 3,645,812
|
def readCoords(f):
"""Read XYZ file and return as MRChem JSON friendly string."""
with open(f) as file:
return '\n'.join([line.strip() for line in file.readlines()[2:]])
|
0cf1a9d07b4b3fe1836ce5c8a308ff67b5fe4c70
| 3,645,813
|
def api_update_note(note_id: int):
"""Update a note"""
db = get_db()
title = request.form["title"] if "title" in request.form.keys() else None
content = request.form["content"] if "content" in request.form.keys() else None
note = db.update_note(note_id, title, content)
return jsonify(note.__dict__)
|
d6668b89854e4aa6c248041a97a55c95cd568e9e
| 3,645,815
|
def padding_oracle(decrypt, cipher, *, bs, unknown=b"\x00", iv=None):
"""Padding Oracle Attack
Given a ciphersystem such that:
- The padding follows the format of PKCS7
- The mode of the block cipher is CBC
- We can check if the padding of a given cipher is correct
- We can try to decrypt ciphertexts without limit
we can break the ciphertext with Padding Oracle Attack.
Usage:
plain = padding_oracle(decrypt, cipher, bs, unknown)
The function decrypt must receive ciphertext and return True or False:
True when the given cipher could successfully be decrypted (No padding error)
False when the given cipher cannot be decrypted (Padding error detected)
"""
if len(cipher) % bs != 0:
raise ValueError("The length of `cipher` must be a multiple of `bs`")
# Split ciphertext into blocks
cipher_blocks = []
for i in range(0, len(cipher), bs):
cipher_blocks.append(cipher[i : i + bs])
plain_blocks = [None for i in range(len(cipher_blocks))]
# Break the cipher
for k in range(len(cipher_blocks) - 1, 0, -1):
plain_blocks[k] = padding_oracle_block(
decrypt, cipher_blocks[k - 1], cipher_blocks[k], bs
)
logger.info(
"decrypted a block {}/{}: {}".format(
len(cipher_blocks) - k + 1, len(cipher_blocks), plain_blocks[k]
) )
if isinstance(unknown, str):
unknown = str2bytes(unknown)
if iv:
plain_blocks[0] = padding_oracle_block(decrypt, iv, cipher_blocks[0], bs)
logger.info("decrypted an iv block: {}".format(plain_blocks[0]))
else:
plain_blocks[0] = unknown * bs
return b"".join(plain_blocks)
|
077eeed2f8f0f2e91aa482c93f36825bdbcef17a
| 3,645,816
|
def pixels():
"""
Raspberry Pi pixels
"""
return render_template("pixels.html")
|
d3af0be80b09096e05a29ef3e9209cef2dba8431
| 3,645,817
|
async def get_song_info(id: str):
"""
获取歌曲详情
"""
params = {'ids': id}
return get_json(base_url + '/song/detail', params=params)
|
2185c62db03bba3019d9d010fc5603c432a0048f
| 3,645,818
|
def _find_odf_idx(map, position):
"""Find odf_idx in the map from the position (col or row).
"""
odf_idx = bisect_left(map, position)
if odf_idx < len(map):
return odf_idx
return None
|
642398d72abe89aa63b7537372499655af5a5ded
| 3,645,819
|
def get_or_create(session, model, **kwargs):
"""
Creates and returns an instance of the model with given kwargs,
if it does not yet exist. Otherwise, get instance and return.
Parameters:
session: Current database session
model: The Class of the database model
**kwargds: The attributes for the desired instance
Returns:
(object): An object instance of the model with given kwargs
"""
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance
else:
instance = model(**kwargs)
session.add(instance)
return instance
|
4d3e4f0da5ca61789171db5d8d16a5fa06e975cc
| 3,645,820
|
def pack_asn1(tag_class, constructed, tag_number, b_data):
"""Pack the value into an ASN.1 data structure.
The structure for an ASN.1 element is
| Identifier Octet(s) | Length Octet(s) | Data Octet(s) |
"""
b_asn1_data = bytearray()
if tag_class < 0 or tag_class > 3:
raise ValueError("tag_class must be between 0 and 3 not %s" % tag_class)
# Bit 8 and 7 denotes the class.
identifier_octets = tag_class << 6
# Bit 6 denotes whether the value is primitive or constructed.
identifier_octets |= ((1 if constructed else 0) << 5)
# Bits 5-1 contain the tag number, if it cannot be encoded in these 5 bits
# then they are set and another octet(s) is used to denote the tag number.
if tag_number < 31:
identifier_octets |= tag_number
b_asn1_data.append(identifier_octets)
else:
identifier_octets |= 31
b_asn1_data.append(identifier_octets)
b_asn1_data.extend(_pack_octet_integer(tag_number))
length = len(b_data)
# If the length can be encoded in 7 bits only 1 octet is required.
if length < 128:
b_asn1_data.append(length)
else:
# Otherwise the length must be encoded across multiple octets
length_octets = bytearray()
while length:
length_octets.append(length & 0b11111111)
length >>= 8
length_octets.reverse() # Reverse to make the higher octets first.
# The first length octet must have the MSB set alongside the number of
# octets the length was encoded in.
b_asn1_data.append(len(length_octets) | 0b10000000)
b_asn1_data.extend(length_octets)
return bytes(b_asn1_data) + b_data
|
14aad1709b5efa46edc5d7ac8659fe1de0615a57
| 3,645,821
|
from typing import Dict
def choose(text: str, prompt: str, options: Dict[str, str], suggestion: str, none_allowed: bool):
"""
Helper function to ask user to select from a list of options (with optional description).
Suggestion can be given. 'None' can be allowed as a valid input value.
"""
p = ColorPrint()
key_list = list(options.keys())
p.print('\n'.join(wrap(text + ':', 80)))
p.print('{!y}[')
for k in range(len(key_list)):
elem = key_list[k]
description = options[elem]
if description:
p.print(' {!m}#{k}{!} {!y}{elem}{!}:', k=k, elem=elem)
for line in description.split('\n'):
p.print(' {line}', line=line)
else:
p.print(' {!m}#{k}{!} {!y}{elem}{!}', k=k, elem=elem)
p.print('{!y}]')
p.print('Selection can be made by unique prefix or index.')
while True:
val = ask(prompt, suggestion, str, none_allowed)
if val is None:
return val
try:
index = int(val)
if index in range(len(key_list)):
return key_list[index]
else:
p.error('{!r}No match for given index.')
except:
matches = [key for key in options.keys() if key[:len(val)] == val]
if len(matches) == 0:
p.error('{!r}No match for given substring.')
elif len(matches) > 1:
p.error('{!r}Selection not unique for given substring.')
else:
return matches[0]
|
0b43452f00378ddc1345b85ca72b37ff1edfae05
| 3,645,822
|
def util_color(
graph: list[list[int]], max_color: int, colored_vertices: list[int], index: int
) -> bool:
"""
alur :
1. Periksa apakah pewarnaan selesai
1.1 Jika pengembalian lengkap True
(artinya kita berhasil mewarnai grafik)
Langkah Rekursif:
2. Iterasi atas setiap warna:
Periksa apakah pewarnaan saat ini valid:
2.1. Warna yang diberikan vertex
2.2. Lakukan pemeriksaan panggilan rekursif
jika pewarnaan ini mengarah pada pemecahan masalah
2.4. jika pewarnaan saat ini mengarah ke pengembalian solusi
2.5. Uncolor diberikan vertex
>>> graph = [[0, 1, 0, 0, 0],
... [1, 0, 1, 0, 1],
... [0, 1, 0, 1, 0],
... [0, 1, 1, 0, 0],
... [0, 1, 0, 0, 0]]
>>> max_colors = 3
>>> colored_vertices = [0, 1, 0, 0, 0]
>>> index = 3
>>> util_color(graph, max_colors, colored_vertices, index)
True
>>> max_colors = 2
>>> util_color(graph, max_colors, colored_vertices, index)
False
"""
if index == len(graph):
return True
for i in range(max_color):
if coloring(graph[index], colored_vertices, i):
colored_vertices[index] = i
if util_color(graph, max_color, colored_vertices, index + 1):
return True
colored_vertices[i] = -1
return False
|
081bf9e8b1e0dcc847fdd0bc78167819506e3f1c
| 3,645,824
|
def reverse_complement(sequence):
""" Return reverse complement of a sequence. """
complement_bases = {
'g':'c', 'c':'g', 'a':'t', 't':'a', 'n':'n',
'G':'C', 'C':'G', 'A':'T', 'T':'A', 'N':'N', "-":"-",
"R":"Y", "Y":"R", "S":"W", "W":"S", "K":"M", "M":"K",
"B":"V", "V":"B", "D": "H", "H": "D",
"r":"y", "y":"r", "s":"w", "w":"s", "k":"m", "m":"k",
"b":"v", "v":"b", "d": "h", "h": "d"
}
bases = list(sequence)
bases.reverse()
revcomp = []
for base in bases:
try:
revcomp.append(complement_bases[base])
except KeyError:
print("Unexpected base encountered: ", base, " returned as X!!!")
revcomp.append("X")
return "".join(revcomp)
|
d28e520a9159cb4812079b4a7a5f2f6eb5723403
| 3,645,825
|
def get_variable_ddi(
name, shape, value, init, initializer=None, dtype=tf.float32,
regularizer=None, trainable=True):
"""Wrapper for data-dependent initialization."""
kwargs = {"trainable": trainable}
if initializer:
kwargs["initializer"] = initializer
if regularizer:
kwargs["regularizer"] = regularizer
w = tf.get_variable(name, shape, dtype, **kwargs)
if isinstance(init, bool):
if init:
return assign(w, value)
return w
else:
return tf.cond(init, lambda: assign(w, value), lambda: w)
|
b941f110ee8efbdfb9e4d2a6b5ba0a5b3e5881ed
| 3,645,826
|
def conv3x3(in_planes, out_planes, Conv=nn.Conv2d, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return Conv(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
|
7741248a5af70e33abe803469c8a20eb2f4bcdb1
| 3,645,828
|
async def async_setup(hass, config):
"""Set up the AirVisual component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
hass.data[DOMAIN][DATA_LISTENER] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
|
e44beaaf7657848fa377700021671d6c27317696
| 3,645,829
|
def hasConnection(document):
"""
Check whether document has a child of :class:`Sea.adapter.connection.Connection`.
:param document: a :class:`FreeCAD.Document` instance
"""
return _hasObject(document, 'Connection')
|
d0999c488ea1af1d0117eb53a6e67d5ce876a142
| 3,645,830
|
from typing import List
def trsfrm_aggregeate_mulindex(df:pd.DataFrame,
grouped_cols:List[str],
agg_col:str,
operation:str,
k:int=5):
"""transform aggregate statistics for multiindex
Examples:
>>> df_agg = trsfrm_aggregeate_mulindex( df_train, ["store", "item"], 'sales', 'mean')
"""
cols = ["sum", "mean", "median", "std", "min", "max", "skew"]
lvl0,lvl1 = grouped_cols
df_agg = pd.DataFrame( df.groupby(grouped_cols)[agg_col].agg(cols) )[operation]
df_agg = df_agg.groupby(level=lvl0).nlargest(k).reset_index(level=1, drop=True)
df_agg = df_agg.reset_index()
df_agg[lvl1] = df_agg.item.astype('category')
return df_agg
|
f84ac88bb3f3474fe5611486031e746e4dc9954d
| 3,645,831
|
from typing import Optional
def get_hub_virtual_network_connection(connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
virtual_hub_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHubVirtualNetworkConnectionResult:
"""
HubVirtualNetworkConnection Resource.
:param str connection_name: The name of the vpn connection.
:param str resource_group_name: The resource group name of the VirtualHub.
:param str virtual_hub_name: The name of the VirtualHub.
"""
__args__ = dict()
__args__['connectionName'] = connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['virtualHubName'] = virtual_hub_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200501:getHubVirtualNetworkConnection', __args__, opts=opts, typ=GetHubVirtualNetworkConnectionResult).value
return AwaitableGetHubVirtualNetworkConnectionResult(
allow_hub_to_remote_vnet_transit=__ret__.allow_hub_to_remote_vnet_transit,
allow_remote_vnet_to_use_hub_vnet_gateways=__ret__.allow_remote_vnet_to_use_hub_vnet_gateways,
enable_internet_security=__ret__.enable_internet_security,
etag=__ret__.etag,
id=__ret__.id,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
remote_virtual_network=__ret__.remote_virtual_network,
routing_configuration=__ret__.routing_configuration)
|
3275fdf70d088df2f00bfe9e0148026caca16fcc
| 3,645,832
|
def newcombe_binomial_ratio_err(k1,n1, k2,n2, z=1.0):
""" Newcombe-Brice-Bonnett ratio confidence interval of two binomial proportions.
"""
RR = (k1/n1) / (k2/n2) # mean
logRR = np.log(RR)
seLogRR = np.sqrt(1/k1 + 1/k2 - 1/n1 - 1/n2)
ash = 2 * np.arcsinh(z/2 * seLogRR)
lower = np.exp(logRR - ash)
upper = np.exp(logRR + ash)
return np.array([lower, upper])
|
8ea31bcbbc1d6393e2d60d9ef6a1052b3b5347c5
| 3,645,833
|
from typing import Any
from typing import Optional
import json
def parse_metrics(rpcs: Any, detokenizer: Optional[detokenize.Detokenizer],
timeout_s: Optional[float]):
"""Detokenizes metric names and retrieves their values."""
# Creates a defaultdict that can infinitely have other defaultdicts
# without a specified type.
metrics: defaultdict = _tree()
if not detokenizer:
_LOG.error('No metrics token database set.')
return metrics
stream_response = rpcs.pw.metric.MetricService.Get(
pw_rpc_timeout_s=timeout_s)
if not stream_response.status.ok():
_LOG.error('Unexpected status %s', stream_response.status)
return metrics
for metric_response in stream_response.responses:
for metric in metric_response.metrics:
path_names = []
for path in metric.token_path:
path_name = str(
detokenize.DetokenizedString(path,
detokenizer.lookup(path), b'',
False)).strip('"')
path_names.append(path_name)
value = metric.as_float if metric.HasField(
'as_float') else metric.as_int
# inserting path_names into metrics.
_insert(metrics, path_names, value)
# Converts default dict objects into standard dictionaries.
return json.loads(json.dumps(metrics))
|
d169e9e247d8b969f6adf5161f0f7399a7b69da6
| 3,645,834
|
def cigarlist_to_cigarstring(cigar_list):
"""
Convert a list of tuples into a cigar string.
Example::
[ (0, 10), (1, 1), (0, 75), (2, 2), (0, 20) ]
=> 10M 1I 75M 2D 20M
=> 10M1I75M2D20M
:param cigar_list: a list of tuples (code, length)
:type cigar_list: list
:return: the cigar string
:rtype: string
:raises: :class:`.exceptions.G2GCigarFormatError` on invalid cigar string
"""
cigar = ''
if isinstance(cigar_list, Cigar):
try:
for i in cigar_list:
cigar += str(i.length) + i.code
except KeyError:
raise exceptions.G2GCigarFormatError("Invalid cigar code: " + str(i))
else:
try:
for i in cigar_list:
cigar += str(i[1]) + CIGAR_N2C[i[0]]
except KeyError:
raise exceptions.G2GCigarFormatError("Invalid cigar code: " + str(i))
return cigar
|
4d3a039f60f8976893e5ad3775f61fbfa2656acc
| 3,645,835
|
def add(x, y):
"""Add two numbers"""
return x+y
|
7f18ee62d6cd75e44a9401d000d9bcada63f2c24
| 3,645,836
|
def clean_acl(name, value):
"""
Returns a cleaned ACL header value, validating that it meets the formatting
requirements for standard Swift ACL strings.
The ACL format is::
[item[,item...]]
Each item can be a group name to give access to or a referrer designation
to grant or deny based on the HTTP Referer header.
The referrer designation format is::
.r:[-]value
The ``.r`` can also be ``.ref``, ``.referer``, or ``.referrer``; though it
will be shortened to just ``.r`` for decreased character count usage.
The value can be ``*`` to specify any referrer host is allowed access, a
specific host name like ``www.example.com``, or if it has a leading period
``.`` or leading ``*.`` it is a domain name specification, like
``.example.com`` or ``*.example.com``. The leading minus sign ``-``
indicates referrer hosts that should be denied access.
Referrer access is applied in the order they are specified. For example,
.r:.example.com,.r:-thief.example.com would allow all hosts ending with
.example.com except for the specific host thief.example.com.
Example valid ACLs::
.r:*
.r:*,.r:-.thief.com
.r:*,.r:.example.com,.r:-thief.example.com
.r:*,.r:-.thief.com,bobs_account,sues_account:sue
bobs_account,sues_account:sue
Example invalid ACLs::
.r:
.r:-
By default, allowing read access via .r will not allow listing objects in
the container -- just retrieving objects from the container. To turn on
listings, use the .rlistings directive.
Also, .r designations aren't allowed in headers whose names include the
word 'write'.
ACLs that are "messy" will be cleaned up. Examples:
====================== ======================
Original Cleaned
---------------------- ----------------------
``bob, sue`` ``bob,sue``
``bob , sue`` ``bob,sue``
``bob,,,sue`` ``bob,sue``
``.referrer : *`` ``.r:*``
``.ref:*.example.com`` ``.r:.example.com``
``.r:*, .rlistings`` ``.r:*,.rlistings``
====================== ======================
:param name: The name of the header being cleaned, such as X-Container-Read
or X-Container-Write.
:param value: The value of the header being cleaned.
:returns: The value, cleaned of extraneous formatting.
:raises ValueError: If the value does not meet the ACL formatting
requirements; the error message will indicate why.
"""
name = name.lower()
values = []
for raw_value in value.split(','):
raw_value = raw_value.strip()
if not raw_value:
continue
if ':' not in raw_value:
values.append(raw_value)
continue
first, second = (v.strip() for v in raw_value.split(':', 1))
if not first or not first.startswith('.'):
values.append(raw_value)
elif first in ('.r', '.ref', '.referer', '.referrer'):
if 'write' in name:
raise ValueError('Referrers not allowed in write ACL: '
'%s' % repr(raw_value))
negate = False
if second and second.startswith('-'):
negate = True
second = second[1:].strip()
if second and second != '*' and second.startswith('*'):
second = second[1:].strip()
if not second or second == '.':
raise ValueError('No host/domain value after referrer '
'designation in ACL: %s' % repr(raw_value))
values.append('.r:%s%s' % ('-' if negate else '', second))
else:
raise ValueError('Unknown designator %s in ACL: %s' %
(repr(first), repr(raw_value)))
return ','.join(values)
|
1cceb2af22d2f5bbf223a0eb381b4c6643d76f0e
| 3,645,839
|
def test(X, Y, perms=10000, method="pearson", tail="two-tail", ignore_nans=False):
"""
Takes two distance matrices (either redundant matrices or condensed vectors)
and performs a Mantel test. The Mantel test is a significance test of the
correlation between two distance matrices.
Parameters
----------
X : array_like
First distance matrix (condensed or redundant).
Y : array_like
Second distance matrix (condensed or redundant), where the order of
elements corresponds to the order of elements in the first matrix.
perms : int, optional
The number of permutations to perform (default: 10000). A larger
number gives more reliable results but takes longer to run. If the
number of possible permutations is smaller, all permutations will
be tested. This can be forced by setting perms to 0.
method : str, optional
Type of correlation coefficient to use; either 'pearson' or 'spearman'
(default: 'pearson').
tail : str, optional
Which tail to test in the calculation of the empirical p-value; either
'upper', 'lower', or 'two-tail' (default: 'two-tail').
ignore_nans : bool, optional
Ignore NaN values in the Y matrix (default: False). This can be
useful if you have missing values in one of the matrices.
Returns
-------
r : float
Veridical correlation
p : float
Empirical p-value
z : float
Standard score (z-score)
"""
# Ensure that X and Y are represented as Numpy arrays.
X = np.asarray(X)
Y = np.asarray(Y)
# Check that X and Y are valid distance matrices.
if (
spatial.distance.is_valid_dm(np.nan_to_num(X)) == False
and spatial.distance.is_valid_y(X) == False
):
raise ValueError("X is not a valid condensed or redundant distance matrix")
if (
spatial.distance.is_valid_dm(np.nan_to_num(Y)) == False
and spatial.distance.is_valid_y(Y) == False
):
raise ValueError("Y is not a valid condensed or redundant distance matrix")
# If X or Y is a redundant distance matrix, reduce it to a condensed distance matrix.
if len(X.shape) == 2:
X = spatial.distance.squareform(X, force="tovector", checks=False)
if len(Y.shape) == 2:
Y = spatial.distance.squareform(Y, force="tovector", checks=False)
# Check for size equality.
if len(X) != len(Y):
raise ValueError("X and Y are not of equal size")
# Check for minimum size.
if len(X) < 3:
raise ValueError("X and Y should represent at least 3 objects")
# Check finiteness of X and Y
if not np.isfinite(X).all():
raise ValueError(
"X cannot contain NaNs (but Y may contain NaNs, so consider reordering X and Y)"
)
finite_Y = np.isfinite(Y)
if not ignore_nans and not finite_Y.all():
raise ValueError('Y may contain NaNs, but "ignore_nans" must be set to True')
if ignore_nans and finite_Y.all():
ignore_nans = False # ignore_nans is True but Y contains no nans
# If Spearman correlation is requested, convert X and Y to ranks.
method = method.lower()
if method == "spearman":
X, Y = stats.rankdata(X), stats.rankdata(Y)
Y[~finite_Y] = np.nan # retain any nans, so that these can be ignored later
# Check for valid method parameter.
elif method != "pearson":
raise ValueError('The method should be set to "pearson" or "spearman"')
# Check for valid tail parameter.
tail = tail.lower()
if tail not in ["upper", "lower", "two-tail"]:
raise ValueError('The tail should be set to "upper", "lower", or "two-tail"')
# Now we're ready to start the Mantel test using a number of optimizations:
#
# 1. Rather than compute correlation coefficients, we'll just compute the
# covariances. This works because the denominator in the equation for the
# correlation coefficient will yield the same result however the objects
# are permuted, making it redundant. Removing the denominator leaves us
# with the covariance.
#
# 2. Rather than permute the Y distances and derive the residuals to calculate
# the covariance with the X distances, we'll represent the Y residuals in
# the matrix and shuffle those directly.
#
# 3. If the number of possible permutations is less than the number of
# permutations that were requested, we'll run a deterministic test where
# we try all possible permutations rather than sample the permutation
# space. This gives a faster, deterministic result.
# Calculate the X and Y residuals, which will be used to compute the
# covariance under each permutation.
X_residuals = X - np.mean(X[finite_Y])
Y_residuals = Y - np.mean(Y[finite_Y])
# Expand the Y residuals to a redundant matrix.
Y_residuals_as_matrix = spatial.distance.squareform(
Y_residuals, force="tomatrix", checks=False
)
m = len(Y_residuals_as_matrix) # number of objects
n = np.math.factorial(m) # number of possible matrix permutations
# If the number of requested permutations is greater than the number of
# possible permutations (m!) or the perms parameter is set to 0, then run a
# deterministic Mantel test
if perms >= n or perms == 0:
if ignore_nans:
correlations = deterministic_test_with_nans(m, n, X, Y_residuals_as_matrix)
else:
correlations = deterministic_test(m, n, X_residuals, Y_residuals_as_matrix)
# correlations[0] is the veridical correlation
else:
if ignore_nans:
correlations = stochastic_test_with_nans(m, perms, X, Y_residuals_as_matrix)
else:
correlations = stochastic_test(m, perms, X_residuals, Y_residuals_as_matrix)
correlations[0] = sum(X_residuals[finite_Y] * Y_residuals[finite_Y]) / np.sqrt(
sum(X_residuals[finite_Y] ** 2) * sum(Y_residuals[finite_Y] ** 2)
) # compute veridical correlation and place in positon 0
r = correlations[0]
if tail == "upper":
p = sum(correlations >= r) / len(correlations)
elif tail == "lower":
p = sum(correlations <= r) / len(correlations)
elif tail == "two-tail":
p = sum(abs(correlations) >= abs(r)) / len(correlations)
z = (r - np.mean(correlations)) / np.std(correlations)
return r, p, z
|
7f0d7447ed475292f221e1dc6e4944f5cb2e8bd4
| 3,645,840
|
def get_format_datestr(date_str, to_format='%Y-%m-%d'):
"""
Args:
date_str (str): ''
to_format (str): '%Y-%m-%d'
Returns:
date string (str)
"""
date_obj = parser.parse(date_str).date()
return date_obj.strftime(to_format)
|
bf443aad3ca38eb35b647d26b38b1404cf82f387
| 3,645,841
|
def lor(*goalconsts):
""" Logical or for goal constructors
>>> from logpy.arith import lor, eq, gt
>>> gte = lor(eq, gt) # greater than or equal to is `eq or gt`
"""
def goal(*args):
return lany(*[gc(*args) for gc in goalconsts])
return goal
|
9726cc24f6d79214e652d42ff1b872f60b5a4594
| 3,645,842
|
import logging
from operator import gt
def score(input,
index,
output=None,
scoring="+U,+u,-s,-t,+1,-i,-a",
filter=None, # "1,2,25"
quality=None,
compress=False,
threads=1,
raw=False,
remove_existing=False):
"""Score the input. In addition, you can specify a tuple with (<score_strata_to_keep>,<max_strata_distance>,<max_alignments>) to
filter the result further.
"""
if compress and output is None:
logging.warning("Disabeling stream compression")
compress = False
if compress and not output.endswith(".gz"):
output += ".gz"
quality = _prepare_quality_parameter(quality)
if quality in ['none', 'ignore']:
quality = 'offset-33'
index = _prepare_index_parameter(index, gem_suffix=True)
score_p = [executables['gem-2-gem'],
'-I', index,
'-q', quality,
'-s', scoring,
'-T', str(threads)
]
if filter is not None:
score_p.append("-f")
ff = filter
if not isinstance(filter, basestring):
ff = ",".join([str(f) for f in filter])
score_p.append(ff)
if raw or isinstance(input, gt.InputFile):
raw = True
if isinstance(input, gt.InputFile) and remove_existing:
input.remove_scores = True
raw = False
#input = input.raw_stream()
tools = [score_p]
if compress:
gzip = _compressor(threads=threads)
tools.append(gzip)
process = utils.run_tools(tools, input=input, output=output, name="GEM-Score", write_map=True, raw=raw)
return _prepare_output(process, output=output)
|
c4b0fee2df964e65ee0aec12c84b0b1d7985a254
| 3,645,845
|
def keyword_search(queryset: QuerySet, keywords: str) -> QuerySet:
"""
Performs a keyword search over a QuerySet
Uses PostgreSQL's full text search features
Args:
queryset (QuerySet): A QuerySet to be searched
keywords (str): A string of keywords to search the QuerySet
Returns:
QuerySet: A QuerySet filtered by keywords
"""
query = SearchQuery(keywords)
rank_annotation = SearchRank(F("search_vector"), query)
filtered_queryset = (
queryset.annotate(rank=rank_annotation)
.filter(search_vector=query)
.order_by("-rank")
)
return filtered_queryset
|
1fb38af2c3aa3bdf092196e8e12539e0a2cf9e58
| 3,645,846
|
def classification_loss(hidden, labels, n_class, initializer, name, reuse=None,
return_logits=False):
"""
Different classification tasks should use different scope names to ensure
different dense layers (parameters) are used to produce the logits.
An exception will be in transfer learning, where one hopes to transfer
the classification weights.
"""
logits = fluid.layers.fc(
input=hidden,
size=n_class,
param_attr=fluid.ParamAttr(name=name+'_logits', initializer=initializer))
one_hot_target = fluid.layers.one_hot(labels, depth=n_class, dtype=hidden.dtype)
loss = -fuid.layers.reduce_sum(fluid.layers.log_softmax(logits) * one_hot_target, -1)
if return_logits:
return loss, logits
return loss
|
c89fd14fae7099b43f639bf0825600e26b60e417
| 3,645,847
|
def do_pdfimages(pdf_file, state, page_number=None, use_tmp_identifier=True):
"""Convert a PDF file to images in the TIFF format.
:param pdf_file: The input file.
:type pdf_file: jfscripts._utils.FilePath
:param state: The state object.
:type state: jfscripts.pdf_compress.State
:param int page_number: Extract only the page with a specific page number.
:return: The return value of `subprocess.run`.
:rtype: subprocess.CompletedProcess
"""
if use_tmp_identifier:
image_root = '{}_{}'.format(pdf_file.basename, tmp_identifier)
else:
image_root = pdf_file.basename
command = ['pdfimages', '-tiff', str(pdf_file), image_root]
if page_number:
page_number = str(page_number)
page_segments = ['-f', page_number, '-l', page_number]
command = command[:2] + page_segments + command[2:]
return run.run(command, cwd=state.common_path)
|
e5a48cdf2c93b037c4f983a56467e839920fa06c
| 3,645,848
|
def connect(transport=None, host='localhost', username='admin',
password='', port=None, key_file=None, cert_file=None,
ca_file=None, timeout=60, return_node=False, **kwargs):
""" Creates a connection using the supplied settings
This function will create a connection to an Arista EOS node using
the arguments. All arguments are optional with default values.
Args:
transport (str): Specifies the type of connection transport to use.
Valid values for the connection are socket, http_local, http, and
https. The default value is specified in DEFAULT_TRANSPORT
host (str): The IP addres or DNS host name of the connection device.
The default value is 'localhost'
username (str): The username to pass to the device to authenticate
the eAPI connection. The default value is 'admin'
password (str): The password to pass to the device to authenticate
the eAPI connection. The default value is ''
port (int): The TCP port of the endpoint for the eAPI connection. If
this keyword is not specified, the default value is automatically
determined by the transport type. (http=80, https=443)
key_file (str): Path to private key file for ssl validation
cert_file (str): Path to PEM formatted cert file for ssl validation
ca_file (str): Path to CA PEM formatted cert file for ssl validation
timeout (int): timeout
return_node (bool): Returns a Node object if True, otherwise
returns an EapiConnection object.
Returns:
An instance of an EapiConnection object for the specified transport.
"""
transport = transport or DEFAULT_TRANSPORT
connection = make_connection(transport, host=host, username=username,
password=password, key_file=key_file,
cert_file=cert_file, ca_file=ca_file,
port=port, timeout=timeout)
if return_node:
return Node(connection, transport=transport, host=host,
username=username, password=password, key_file=key_file,
cert_file=cert_file, ca_file=ca_file, port=port, **kwargs)
return connection
|
09407d39e624f9a863a7633627d042b17b7a6158
| 3,645,850
|
def cov_hc2(results):
"""
See statsmodels.RegressionResults
"""
# probably could be optimized
h = np.diag(np.dot(results.model.exog,
np.dot(results.normalized_cov_params,
results.model.exog.T)))
het_scale = results.resid**2/(1-h)
cov_hc2_ = _HCCM(results, het_scale)
return cov_hc2_
|
328eeb88e37a2d78a6c0f0f9b3b81459230d87d5
| 3,645,851
|
def add_people():
"""
Show add form
"""
if request.method == 'POST':
#save data to database
db_conn = get_connection()
cur = db_conn.cursor()
print ('>'*10, request.form)
firstname = request.form['first-name']
lastname = request.form['last-name']
address = request.form['address']
country = request.form['country']
# if firstname is not empty, insert into table:
if firstname.strip():
_add_sql = '''
INSERT INTO peoples(firstname, lastname, address, country)
VALUES(?,?,?,?)
'''
cur.execute(_add_sql, (firstname.strip(),
lastname.strip(), address.strip(), country.strip()
))
db_conn.commit()
#redirect to list page
return redirect(url_for('list_people'))
else:
#redirect to add page with error
return redirect(url_for('add_people'))
return render_template('add.jinja2')
|
db2fcd7a2d9ed0073741d02a0bcafef37f714299
| 3,645,852
|
import inspect
def api_to_schema(api: "lightbus.Api") -> dict:
"""Produce a lightbus schema for the given API"""
schema = {"rpcs": {}, "events": {}}
if isinstance(api, type):
raise InvalidApiForSchemaCreation(
"An attempt was made to derive an API schema from a type/class, rather than "
"from an instance of an API. This is probably because you are passing an API "
"class to api_to_schema(), rather than an instance of the API class."
)
for member_name, member in inspect.getmembers(api):
if member_name.startswith("_"):
# Don't create schema from private methods
continue
if hasattr(Api, member_name):
# Don't create schema for methods defined on Api class
continue
if inspect.ismethod(member):
schema["rpcs"][member_name] = {
"parameters": make_rpc_parameter_schema(api.meta.name, member_name, method=member),
"response": make_response_schema(api.meta.name, member_name, method=member),
}
elif isinstance(member, Event):
schema["events"][member_name] = {
"parameters": make_event_parameter_schema(api.meta.name, member_name, event=member)
}
return schema
|
d07f6c6915967a1e61bc8f9bd1b72adb24207684
| 3,645,853
|
def sum2(u : SignalUserTemplate, initial_state=0):
"""Accumulative sum
Parameters
----------
u : SignalUserTemplate
the input signal
initial_state : float, SignalUserTemplate
the initial state
Returns
-------
SignalUserTemplate
the output signal of the filter
Details:
--------
The difference equation
y[k+1] = y[k] + u[k]
is evaluated. The return values are
y[k], y[k+1]
"""
y_k = dy.signal()
y_kp1 = y_k + u
y_k << dy.delay(y_kp1, initial_state=initial_state)
return y_k, y_kp1
|
3649942de13f698a92703747d8ea73be7ece4ddb
| 3,645,854
|
def approve_report(id):
"""
Function to approve a report
"""
# Approve the vulnerability_document record
resource = s3db.resource("vulnerability_document", id=id, unapproved=True)
resource.approve()
# Read the record details
vdoc_table = db.vulnerability_document
record = db(vdoc_table.id == id).select(vdoc_table.document_type,
vdoc_table.doc_id,
vdoc_table.source_id,
limitby=(0, 1)).first()
# Approve the linked records
document_type = record.document_type
if document_type == "indicator":
tablename = "vulnerability_data"
table = s3db[tablename]
query = (table.source_id == record.source_id)
agg_function = "vulnerability_update_aggregates"
elif document_type == "demographic":
tablename = "stats_demographic_data"
table = s3db[tablename]
query = (table.source_id == record.source_id)
agg_function = "stats_demographic_update_aggregates"
elif document_type in ("map", "image"):
tablename = "doc_image"
query = (s3db[tablename].doc_id == record.doc_id)
elif document_type in ("vca", "other"):
tablename = "doc_document"
query = (s3db[tablename].doc_id == record.doc_id)
else:
current.log.error("Report not Approved as unknown type", document_type)
return False
resource = s3db.resource(tablename, filter=query, unapproved=True)
resource.approve()
if document_type in ("indicator", "demographic"):
# Rebuild the relevant aggregates
rows = resource.select(fields=["data_id",
"parameter_id",
"date",
"location_id",
"value"],
as_rows=True)
s3task.run_async(agg_function, vars = {"records": rows.json()})
return True
|
ce1bdb00a5fb6958c51422543e62f289de5e96cb
| 3,645,855
|
def sparse_column_multiply(E, a):
"""
Multiply each columns of the sparse matrix E by a scalar a
Parameters
----------
E: `np.array` or `sp.spmatrix`
a: `np.array`
A scalar vector.
Returns
-------
Rescaled sparse matrix
"""
ncol = E.shape[1]
if ncol != a.shape[0]:
logg.error("Dimension mismatch, multiplication failed")
return E
else:
w = ssp.lil_matrix((ncol, ncol))
w.setdiag(a)
return ssp.csr_matrix(E) * w
|
a215440e630aeb79758e8b0d324ae52ea87eba52
| 3,645,856
|
def soup_extract_enzymelinks(tabletag):
"""Extract all URLs for enzyme families from first table."""
return {link.string: link['href']
for link in tabletag.find_all("a", href=True)}
|
7baabd98042ab59feb5d8527c18fe9fa4b6a50af
| 3,645,857
|
def loops_NumbaJit_parallelFast(csm, r0, rm, kj):
""" This method implements the prange over the Gridpoints, which is a direct
implementation of the currently used c++ methods created with scipy.wave.
Very strange: Just like with Cython, this implementation (prange over Gridpoints)
produces wrong results. If one doesn't parallelize -> everything is good
(just like with Cython). Maybe Cython and Numba.jit use the same interpreter
to generate OpenMP-parallelizable code.
BUT: If one uncomments the 'steerVec' declaration in the prange-loop over the
gridpoints an error occurs. After commenting the line again and executing
the script once more, THE BEAMFORMER-RESULTS ARE CORRECT (for repeated tries).
Funny enough the method is now twice as slow in comparison to the
'wrong version' (before invoking the error).
"""
# init
nFreqs = csm.shape[0]
nGridPoints = len(r0)
nMics = csm.shape[1]
beamformOutput = np.zeros((nFreqs, nGridPoints), np.float64)
steerVec = np.zeros((nMics), np.complex128)
for cntFreqs in xrange(nFreqs):
kjj = kj[cntFreqs].imag
for cntGrid in prange(nGridPoints):
# steerVec = np.zeros((nMics), np.complex128) # This is the line that has to be uncommented (see this methods documentation comment)
rs = 0
r01 = r0[cntGrid]
for cntMics in xrange(nMics):
rm1 = rm[cntGrid, cntMics]
rs += 1.0 / (rm1**2)
temp3 = np.float32(kjj * (rm1 - r01))
steerVec[cntMics] = (np.cos(temp3) - 1j * np.sin(temp3)) * rm1
rs = r01 ** 2
temp1 = 0.0
for cntMics in xrange(nMics):
temp2 = 0.0
for cntMics2 in xrange(cntMics):
temp2 = temp2 + csm[cntFreqs, cntMics2, cntMics] * steerVec[cntMics2]
temp1 = temp1 + 2 * (temp2 * steerVec[cntMics].conjugate()).real
temp1 = temp1 + (csm[cntFreqs, cntMics, cntMics] * np.conjugate(steerVec[cntMics]) * steerVec[cntMics]).real
beamformOutput[cntFreqs, cntGrid] = (temp1 / rs).real
return beamformOutput
|
82201310483b72c525d1488b5229e628d44a65ca
| 3,645,859
|
import numpy
import scipy
def sobel_vertical_gradient(image: numpy.ndarray) -> numpy.ndarray:
"""
Computes the Sobel gradient in the vertical direction.
Args:
image: A two dimensional array, representing the image from which the vertical gradient will be calculated.
Returns:
A two dimensional array, representing the vertical gradient of the image.
"""
ky = numpy.array([[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]])
return scipy.ndimage.convolve(image, ky)
|
f8f9bf6fadbae962206255ab3de57edbab9d935e
| 3,645,860
|
def custom_field_sum(issues, custom_field):
"""Sums custom field values together.
Args:
issues: List The issue list from the JQL query
custom_field: String The custom field to sum.
Returns:
Integer of the sum of all the found values of the custom_field.
"""
custom_field_running_total = 0
for issue in issues:
if getattr(issue.fields, custom_field) is None:
custom_field_running_total = custom_field_running_total + 2
else:
custom_field_running_total = custom_field_running_total + \
getattr(issue.fields, custom_field)
return custom_field_running_total
|
32c1cce310c06f81036ee79d70a8d4bbe28c8417
| 3,645,861
|
def routingAreaUpdateReject():
"""ROUTING AREA UPDATE REJECT Section 9.4.17"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0xb) # 00001011
c = GmmCause()
d = ForceToStandbyAndSpareHalfOctets()
packet = a / b / c / d
return packet
|
b9bb0e498a768eb6b7875018c78ca23e54353620
| 3,645,862
|
def doRipsFiltration(X, maxHomDim, thresh = -1, coeff = 2, getCocycles = False):
"""
Run ripser assuming Euclidean distance of a point cloud X
:param X: An N x d dimensional point cloud
:param maxHomDim: The dimension up to which to compute persistent homology
:param thresh: Threshold up to which to add edges. If not specified, add all
edges up to the full clique
:param coeff: A prime to use as the field coefficients for the PH computation
:param getCocycles: True if cocycles should be computed and returned
:return: PDs (array of all persistence diagrams from 0D up to maxHomDim).
Each persistence diagram is a numpy array
OR
tuple (PDs, Cocycles) if returning cocycles
"""
D = getSSM(X)
return doRipsFiltrationDM(D, maxHomDim, thresh, coeff, getCocycles)
|
a0e4cabb613ac77659fda2d31867a7e9df32f288
| 3,645,863
|
def build_target_areas(entry):
"""Cleanup the raw target areas description string"""
target_areas = []
areas = str(entry['cap:areaDesc']).split(';')
for area in areas:
target_areas.append(area.strip())
return target_areas
|
48e76a5c1ed42aed696d441c71799b47f9193b29
| 3,645,864
|
def convert_to_celcius(scale, temp):
"""Convert the specified temperature to Celcius scale.
:param int scale: The scale to convert to Celcius.
:param float temp: The temperature value to convert.
:returns: The temperature in degrees Celcius.
:rtype: float
"""
if scale == temp_scale.FARENHEIT:
return convert_farenheit_to_celcius(temp)
elif scale == temp_scale.CELCIUS:
return temp
elif scale == temp_scale.KELVIN:
return convert_kelvin_to_celcius(temp)
elif scale == temp_scale.RANKINE:
return convert_rankine_to_celcius(temp)
else:
return 0.0
|
a7c2f0f7405eea96c1ebb4a7e103cf28a95b6f5a
| 3,645,865
|
def config_file_settings(request):
"""
Update file metadata settings
"""
if request.user.username != 'admin':
return redirect('project-admin:home')
if request.method == 'POST':
update_file_metadata(request.POST)
return redirect('project-admin:home')
files = FileMetaData.objects.all()
for file in files:
file.tags = file.get_tags()
return render(request, 'project_admin/config-file-settings.html',
context={"files": files})
|
c8e91ac49305e3aa7aa33961939c3add23fc5327
| 3,645,866
|
def roundtrip(sender, receiver):
"""
Send datagrams from `sender` to `receiver` and back.
"""
return transfer(sender, receiver), transfer(receiver, sender)
|
939d9fd861b89037322fcc7c851d291ab073b520
| 3,645,867
|
import json
def loadHashDictionaries():
"""
Load dictionaries containing id -> hash and hash -> id mappings
These dictionaries are essential due to some restrictive properties
of the anserini repository
Return both dictionaries
"""
with open(PATH + PATH_ID_TO_HASH, "r") as f:
id_to_hash_dict = json.load(f)
with open(PATH + PATH_HASH_TO_ID, "r") as f:
hash_to_id_dict = json.load(f)
return id_to_hash_dict, hash_to_id_dict
|
de8af6d5e5562869c992e08343aadb77c48933b0
| 3,645,868
|
def preprocess(tensor_dict, preprocess_options, func_arg_map=None):
"""Preprocess images and bounding boxes.
Various types of preprocessing (to be implemented) based on the
preprocess_options dictionary e.g. "crop image" (affects image and possibly
boxes), "white balance image" (affects only image), etc. If self._options
is None, no preprocessing is done.
Args:
tensor_dict: dictionary that contains images, boxes, and can contain other
things as well.
images-> rank 4 float32 tensor contains
1 image -> [1, height, width, 3].
with pixel values varying between [0, 1]
boxes-> rank 2 float32 tensor containing
the bounding boxes -> [N, 4].
Boxes are in normalized form meaning
their coordinates vary between [0, 1].
Each row is in the form
of [ymin, xmin, ymax, xmax].
preprocess_options: It is a list of tuples, where each tuple contains a
function and a dictionary that contains arguments and
their values.
func_arg_map: mapping from preprocessing functions to arguments that they
expect to receive and return.
Returns:
tensor_dict: which contains the preprocessed images, bounding boxes, etc.
Raises:
ValueError: (a) If the functions passed to Preprocess
are not in func_arg_map.
(b) If the arguments that a function needs
do not exist in tensor_dict.
(c) If image in tensor_dict is not rank 4
"""
if func_arg_map is None:
func_arg_map = get_default_func_arg_map()
# changes the images to image (rank 4 to rank 3) since the functions
# receive rank 3 tensor for image
if fields.InputDataFields.image in tensor_dict:
image = tensor_dict[fields.InputDataFields.image]
# if len(images.get_shape()) != 4:
# raise ValueError('images in tensor_dict should be rank 4')
# image = tf.squeeze(images, squeeze_dims=[0])
if len(image.get_shape()) != 3:
raise ValueError('images in tensor_dict should be rank 3')
tensor_dict[fields.InputDataFields.image] = image
# Preprocess inputs based on preprocess_options
for option in preprocess_options:
func, params = option
if func not in func_arg_map:
raise ValueError('The function %s does not exist in func_arg_map' %
(func.__name__))
arg_names = func_arg_map[func]
for a in arg_names:
if a is not None and a not in tensor_dict:
raise ValueError('The function %s requires argument %s' %
(func.__name__, a))
def get_arg(key):
return tensor_dict[key] if key is not None else None
args = [get_arg(a) for a in arg_names]
results = func(*args, **params)
if not isinstance(results, (list, tuple)):
results = (results,)
# Removes None args since the return values will not contain those.
arg_names = [arg_name for arg_name in arg_names if arg_name is not None]
for res, arg_name in zip(results, arg_names):
tensor_dict[arg_name] = res
# # changes the image to images (rank 3 to rank 4) to be compatible to what
# # we received in the first place
# if fields.InputDataFields.image in tensor_dict:
# image = tensor_dict[fields.InputDataFields.image]
# images = tf.expand_dims(image, 0)
# tensor_dict[fields.InputDataFields.image] = images
return tensor_dict
|
141b170e0d4c6447750e2ece967afec7a92a37ea
| 3,645,869
|
def update_comment(id):
"""修改单条评论"""
comment = Comment.query.get_or_404(id)
if g.current_user != comment.author and not g.current_user.can(Permission.COMMENT):
return error_response(403)
data = request.get_json()
if not data:
return bad_request('You must put JSON data.')
comment.from_dict(data)
db.session.commit()
return jsonify(comment.to_dict())
|
59db7122f9139f7fda744284e83045533d6361fb
| 3,645,870
|
def resnet18(num_classes, pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
encoder = ResNetEncoder(BasicBlock, [2, 2, 2, 2])
if pretrained:
encoder.load_state_dict(model_zoo.load_url(model_urls['resnet18'], model_dir='models'), strict=False)
model = RetinaNet(encoder=encoder, num_classes=num_classes)
return model
|
b342fa322cb26571b5df7e5e8f117ce016a7febf
| 3,645,871
|
import html
def display_page(pathname):
"""displays dash page"""
if pathname == '/':
return main.layout
elif pathname == '/explore':
return explore.layout
elif pathname == '/eval':
return eval.layout
elif pathname == '/train':
return train.layout
else:
return html.Div(dbc.Col(dbc.Jumbotron(
[
html.H1("404: Not found", className="text-danger"),
html.Hr(),
html.P(f"The pathname {pathname} was not recognized..."),
]
), width = 9), style = CONTENT_STYLE
)
|
3aeb44ca1974b63f63b9ef97526aef20d2d92ddb
| 3,645,872
|
def getFilterDict(args):
"""
Function: An entire function just to notify the user of the arguments they've passed to the script? Seems reasonable.
Called from: main
"""
## Set variables for organization; this can be probably be removed later
outText = {}
outAction = ""
userString = ""
ipString = ""
ctryString = ""
domainString = ""
evntString = ""
## Split the modifierData variable if user passed multiple values in a comma-separated list
if args.modifierData:
modData = args.modifierData.split(",")
## Set analysis type as one of the three main functions available
if args.lIPs:
outAction = "Analysis Type: IP dump"
elif args.topNum:
outAction = "Analysis Type: Log summary"
else:
outAction = "Analysis Type: Detailed Analysis"
## Determine if results will be filtered or excluded by user & create output string. Note
## that usernames passed in DOMAIN\USERNAME format will need to be converted back to a
## single backslash (\) where the user escaped command input with a double backslash (\\)
try:
if args.filterType.lower() == "user" or args.excludeType.lower() == "user":
for i in range(0,(len(modData))):
if userString == "":
userString = modData[i].replace("\\\\","\\")
else:
userString = userString + ", " + modData[i].replace("\\\\","\\")
if args.filterType:
userString = " Users - Only " + userString
else:
userString = " Users - All except " + userString
except:
pass
## Determine if results will be filtered or excluded by IP address & create output string
try:
if args.filterType.lower() == "ip" or args.excludeType.lower() == "ip":
for i in range(0,(len(modData))):
if ipString == "":
ipString = modData[i]
else:
ipString = ipString + ", " + modData[i]
if args.filterType:
ipString = " IPs - Only " + ipString
else:
ipString = " IPs - All except " + ipString
except:
pass
## If the user passed the -P argument to omit private IP addresses, add it to IP line
if args.privIP:
if ipString == "":
ipString = " IPs - All except internal addresses"
else:
ipString += ", and internal addresses"
## Determine if results will be filtered or excluded by country & create output string
try:
if args.filterType.lower() == "country" or args.excludeType.lower() == "country":
for i in range(0,(len(modData))):
if ctryString == "":
ctryString = modData[i]
else:
ctryString = ctryString + ", " + modData[i]
if args.filterType:
ctryString = " Countries - Only " + ctryString
else:
ctryString = " Countries - All except " + ctryString
except:
pass
## Determine if results will be filtered or excluded by domain & create output string
try:
if args.filterType.lower() == "domain" or args.excludeType.lower() == "domain":
for i in range(0,(len(modData))):
if domainString == "":
domainString = modData[i]
else:
domainString = domainString + ", " + modData[i]
if args.filterType:
domainString = " Domains - Only " + domainString
else:
domainString = " Domains - All except " + domainString
except:
pass
## Determine if benign 'garbage' events will be filtered out and update misc event filter string
if args.logGarbage:
evntString = "No garbage events"
## Determine if only known cities will be presented in the results and update misc event filter string
if args.kCity:
if evntString == "":
evntString = "No unknown cities"
else:
evntString = evntString + ", no unknown cities"
## Determine if events will only be filtered to IPs with foreign geolocation and update filter string
if args.warnIP:
if ipString == "":
ipString = " IPs - Only IPs foreign to current location"
else:
ipString = ipString + ", only IPs foreign to current location"
## If any filter strings are empty, replace them with notice that all events of the given type will be included in output
if userString == "":
userString = " Users - ALL"
if ipString == "":
ipString = " IPs - ALL"
if ctryString == "":
ctryString = " Countries - ALL"
if domainString == "":
domainString = " Domains - ALL"
if evntString == "":
evntString = " Events - ALL"
else:
evntString = " Events - " + evntString
## Arrange the outText dictionary to be passed back to main and ship it
outText["outAction"] = outAction
outText["userString"] = userString
outText["ipString"] = ipString
outText["ctryString"] = ctryString
outText["domainString"] = domainString
outText["evntString"] = evntString
return outText
|
ea175812465fa30866fe90c6461f416c4af1d6b2
| 3,645,873
|
def pointwise_multiply(A, B):
"""Pointwise multiply
Args:
-----------------------------
A: tvm.te.tensor.Tensor
shape [...]
B: tvm.te.tensor.Tensor
shape same as A
-----------------------------
Returns:
-----------------------------
tvm.te.tensor.Tensor
shape same as A
-----------------------------
"""
assert_print(len(A.shape) == len(B.shape))
for i in range(len(A.shape)):
assert_print(A.shape[i].value == B.shape[i].value)
def _mul(*args):
return A[args] * B[args]
return tvm.te.compute(A.shape, _mul)
|
37c27cced9cc77f3a3aefef32d56f92f0ceb292f
| 3,645,874
|
import traceback
def create_website(self):
"""
:param self:
:return:
"""
try:
query = {}
show = {"_id": 0}
website_list = yield self.mongodb.website.find(query, show)
return website_list
except:
logger.error(traceback.format_exc())
return ""
|
b7b8faf55095288e5c2d693aeed85f6412449c08
| 3,645,875
|
def _get_sparsity(A, tolerance=0.01):
"""Returns ~% of zeros."""
positives = np.abs(A) > tolerance
non_zeros = np.count_nonzero(positives)
return (A.size - non_zeros) / float(A.size)
|
44b7fb501a10551167ad37ffdafaef42c6c849b9
| 3,645,876
|
def findPeaks(hist):
"""
Take in histogram
Go through each bin in the histogram and:
Find local maximum and:
Fit a parabola around the two neighbor bins and local max bin
Calculate the critical point that produces the max of the parabola
(critical point represents orientation, max is the peak)
Add both to list of peaks
Return sorted list of peaks
"""
peaks = []
offsets = []
binRanges = np.arange(-175, 185, 10)
max = np.max(hist)
for i in range(len(hist)):
if i == 0:
left, right = -1, 1
elif i == len(hist) - 1:
left, right = -2, 0
else:
left, right = i-1, i+1
if (hist[i] - hist[left]) >= (0.01*max) \
and (hist[i] - hist[right]) >= (0.01*max):
a = (hist[right] - 2*hist[i] + hist[left]) / 2
b = (hist[right] - hist[left]) / 2
c = hist[i]
aDx = a*2
bDx = -1*b
#critical point
x = bDx/aDx
# max
max = a*(x**2) + b*x + c
offset = (x*10) + binRanges[i]
peaks.append((max, offset))
return sorted(peaks, reverse=True)
|
99b89d4fd9f35deab141e178aaa107dabf35ccfe
| 3,645,877
|
def gradient_descent(x_0, a, eta, alpha, beta, it_max, *args, **kwargs):
"""Perform simple gradient descent with back-tracking line search.
"""
# Get a copy of x_0 so we don't modify it for other project parts.
x = x_0.copy()
# Get an initial gradient.
g = gradient(x, a)
# Compute the norm.
norm = np.linalg.norm(g)
# Initialize lists to track our objective values and step sizes.
obj_list = []
t_list = []
# Loop while the norm is less than eta.
i = 0
while (eta <= norm) and (i < it_max):
# Perform back-tracking line search to get our step size.
t = backtrack_line_search(x=x, a=a, g=g, dx=-g, alpha=alpha, beta=beta)
t_list.append(t)
# Perform the x update.
x = x - t * g
# Compute new gradient and norm.
g = gradient(x, a)
norm = np.linalg.norm(g)
# Compute new value of objective function, append to list.
obj_list.append(objective(x, a))
if np.isnan(obj_list[-1]):
raise ValueError(
'NaN objective value encountered in gradient_descent')
# Update iteration counter.
i += 1
if i >= it_max:
raise ValueError(f'Hit {i} iterations in gradient_descent.')
return x, np.array(obj_list), t_list
|
701097aaebbe15306818593daf501b0f7d622f49
| 3,645,879
|
from typing import Counter
def knn_python(input_x, dataset, labels, k):
"""
:param input_x: 待分类的输入向量
:param dataset: 作为参考计算距离的训练样本集
:param labels: 数据样本对应的分类标签
:param k: 选择最近邻样本的数目
"""
# 1. 计算待测样本与参考样本之间的欧式距离
dist = np.sum((input_x - dataset) ** 2, axis=1) ** 0.5
# 2. 选取 k 个最近邻样本的标签
k_labels = [labels[index] for index in dist.argsort()[0: k]]
# 3. 得到出现次数最多的标签作为最终的分类类别
label = Counter(k_labels).most_common(1)[0][0]
return label
|
8deaec88369d2d0cb42ebdd3961caf891357335b
| 3,645,881
|
def all_logit_coverage_function(coverage_batches):
"""Computes coverage based on the sum of the absolute values of the logits.
Args:
coverage_batches: Numpy arrays containing coverage information pulled from
a call to sess.run. In this case, we assume that these correspond to a
batch of logits.
Returns:
A python integer corresponding to the sum of the absolute values of the
logits.
"""
coverage_batch = coverage_batches[0]
coverage_list = []
for idx in range(coverage_batch.shape[0]):
elt = coverage_batch[idx]
elt = np.expand_dims(np.sum(np.abs(elt)), 0)
coverage_list.append(elt)
return coverage_list
|
32674a4528b69b756b3fc5f161dcbfd3ceaba01f
| 3,645,884
|
import asyncio
async def create_audio(request):
"""Process the request from the 'asterisk_ws_monitor' and creates the audio file"""
try:
message = request.rel_url.query["message"]
except KeyError:
message = None
LOGGER.error(f"No 'message' parameter passed on: '{request.rel_url}'")
raise web.HTTPClientError(
reason=GENERATE_AUDIO_ERROR, body=None, text=None, content_type=None
)
try:
msg_chk_sum = request.rel_url.query["msg_chk_sum"]
except KeyError:
msg_chk_sum = None
LOGGER.error(f"No 'msg_chk_sum' parameter passed on: '{request.rel_url}'")
raise web.HTTPClientError(
reason=GENERATE_AUDIO_ERROR, body=None, text=None, content_type=None
)
inner_loop = asyncio.get_running_loop()
executor = ThreadPoolExecutor(max_workers=NUM_OF_CPUS)
futures = inner_loop.run_in_executor(
executor, create_audio_file, message, msg_chk_sum
)
try:
await asyncio.ensure_future(futures)
status_code = 200
except Exception as e:
status_code = 500
LOGGER.error(f"Unable to generate the audio file: '{e}'")
return web.json_response({"status": status_code})
|
6aa90764c167be9a1d980dea0e54243a9467c276
| 3,645,885
|
def reinterpret_axis(block, axis, label, scale=None, units=None):
""" Manually reinterpret the scale and/or units on an axis """
def header_transform(hdr, axis=axis, label=label, scale=scale, units=units):
tensor = hdr['_tensor']
if isinstance(axis, basestring):
axis = tensor['labels'].index(axis)
if label is not None:
tensor['labels'][axis] = label
if scale is not None:
tensor['scales'][axis] = scale
if units is not None:
tensor['units'][axis] = units
return hdr
return block_view(block, header_transform)
|
e19d1a5cf567f72ae261cc0fef69b03e2d8a9696
| 3,645,886
|
def duel(board_size, player_map):
"""
:param board_size: the board size (i.e. a 2-tuple)
:param player_map: a dict, where the key is an int, 0 or 1, representing the player, and the value is the policy
:return: the resulting game outcomes
"""
board_state = init_board_state(board_size)
results = {p: {"won": 0, "lost": 0, "tied": 0} for p in player_map}
for player in player_map:
for edge_index in range(len(board_state)):
players = [player, (1 - player)]
if edge_index % 2 == 0:
players = [x for x in reversed(players)]
game = Game(board_size, players)
current_player = game.get_current_player()
# select the first edge for the first player
current_player, _ = game.select_edge(edge_index, current_player)
while not game.is_finished():
state = game.get_board_state()
edge = player_map[current_player].select_edge(
state, game.get_score(current_player), game.get_score(1 - current_player))
current_player, _ = game.select_edge(edge, current_player)
p0_score = game.get_score(0)
p1_score = game.get_score(1)
if p0_score > p1_score:
results[0]["won"] += 1
results[1]["lost"] += 1
if p1_score > p0_score:
results[1]["won"] += 1
results[0]["lost"] += 1
if p0_score == p1_score:
results[0]["tied"] += 1
results[1]["tied"] += 1
return results
|
3fbcd1477fc90553cdc5371440083c9737b4bf5b
| 3,645,887
|
def set_processor_type(*args):
"""
set_processor_type(procname, level) -> bool
Set target processor type. Once a processor module is loaded, it
cannot be replaced until we close the idb.
@param procname: name of processor type (one of names present in
\ph{psnames}) (C++: const char *)
@param level: SETPROC_ (C++: setproc_level_t)
@return: success
"""
return _ida_idp.set_processor_type(*args)
|
32d827fe0c0d152af98e6bed5baa7a24d372c4f8
| 3,645,888
|
from onnx.helper import make_node
from onnx import TensorProto
def convert_repeat(node, **kwargs):
"""Map MXNet's repeat operator attributes to onnx's Tile operator.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
opset_version = kwargs['opset_version']
if opset_version < 11:
raise AttributeError('ONNX opset 11 or greater is required to export this operator')
repeats = int(attrs.get('repeats', 1))
axis = attrs.get('axis', 'None')
if repeats <= 0:
raise NotImplementedError('repeat operator does not support parameter repeats==0')
nodes = []
if axis == 'None':
create_tensor([-1], name+'_-1', kwargs['initializer'])
create_tensor([repeats], name+'_rep', kwargs['initializer'])
create_tensor([1, repeats], name+'_repeats', kwargs['initializer'])
nodes += [
make_node('Shape', [input_nodes[0]], [name+'_shape']),
make_node('ReduceProd', [name+'_shape'], [name+'_size']),
make_node('Reshape', [input_nodes[0], name+'_size'], [name+'_flat']),
make_node('Unsqueeze', [name+'_flat', name+'_-1'], [name+'_unsqueeze']),
make_node('Tile', [name+'_unsqueeze', name+'_repeats'], [name+'_tile']),
make_node('Mul', [name+'_size', name+'_rep'], [name+'_new_size']),
make_node('Reshape', [name+'_tile', name+'_new_size'], [name], name=name)
]
else:
axis = int(axis)
repeats -= 1
create_tensor([repeats], name+'_repeats', kwargs['initializer'])
create_tensor([1], name+'_1', kwargs['initializer'])
create_tensor([0], name+'_0', kwargs['initializer'])
create_tensor([axis], name+'_axis', kwargs['initializer'])
create_const_scalar_node(name+"_0_s", np.int64(0), kwargs)
create_const_scalar_node(name+"_1_s", np.int64(1), kwargs)
nodes += [
make_node('Shape', [input_nodes[0]], [name+'_shape']),
make_node('Shape', [name+'_shape'], [name+'_dim']),
make_node('Squeeze', [name+'_dim', name+'_0'], [name+'_dim_s']),
make_node('Range', [name+'_0_s', name+'_dim_s', name+'_1_s'], [name+'_range'])
]
if axis < 0:
nodes += [
make_node('Add', [name+'_axis', name+'_dim'], [name+'_true_axis']),
make_node('Equal', [name+'_range', name+'_true_axis'], [name+'_one_hot'])
]
else:
nodes += [
make_node('Equal', [name+'_range', name+'_axis'], [name+'_one_hot'])
]
nodes += [
make_node('Cast', [name+'_one_hot'], [name+'_one_hot_int'], to=int(TensorProto.INT64)),
make_node('Mul', [name+'_repeats', name+'_one_hot_int'], [name+'_mul']),
make_node('Add', [name+'_mul', name+'_1'], [name+'_add']),
make_node('Concat', [name+'_1', name+'_add'], [name+'_repeats_tensor'], axis=0)
]
if axis == -1:
nodes += [
make_node('Concat', [name+'_shape', name+'_1'], [name+'_unsqueeze_shape'], axis=0),
make_node('Reshape', [input_nodes[0], name+'_unsqueeze_shape'],
[name+'_unsqueeze'])
]
else:
create_tensor([axis+1], name+'_axis+1', kwargs['initializer'])
nodes += [
make_node('Unsqueeze', [input_nodes[0], name+'_axis+1'], [name+'_unsqueeze'])
]
nodes += [
make_node('Tile', [name+'_unsqueeze', name+'_repeats_tensor'], [name+'_tile']),
make_node('Mul', [name+'_shape', name+'_add'], [name+'_new_shape']),
make_node('Reshape', [name+'_tile', name+'_new_shape'], [name], name=name)
]
return nodes
|
120e1ee364bf64b00b504fcdc8d0769a6d02db7b
| 3,645,889
|
def my_quote(s, safe = '/'):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
"""
cachekey = (safe, always_safe)
try:
safe_map = _safemaps[cachekey]
except KeyError:
safe += always_safe
safe_map = {}
for i in range(256):
c = chr(i)
safe_map[c] = (c in safe) and c or ('%%%02x' % i)
_safemaps[cachekey] = safe_map
res = map(safe_map.__getitem__, s)
return ''.join(res)
|
c5c28b7779e9cab2488696435832f9f7cbd03e57
| 3,645,890
|
from masci_tools.tools.cf_calculation import CFCalculation, plot_crystal_field_calculation
def test_plot_crystal_field_calculation():
"""
Test of the plot illustrating the potential and charge density going into the calculation
"""
cf = CFCalculation()
cf.readPot('files/cf_calculation/CFdata.hdf')
cf.readCDN('files/cf_calculation/CFdata.hdf')
plt.gcf().clear()
plot_crystal_field_calculation(cf, show=False)
return plt.gcf()
|
90488103d929929615dc1e5de1531102a9f7b96a
| 3,645,892
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.