content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import math
def tgamma ( x ) :
"""'tgamma' function taking into account the uncertainties
"""
fun = getattr ( x , '__tgamma__' , None )
if fun : return fun()
return math.gamma ( x )
|
35c73e2e0a9945cb38beffb6376dd7b7bc6443e9
| 3,637,150
|
def detect_peaks_by_channel(traces, peak_sign, abs_threholds, n_shifts):
"""Detect peaks using the 'by channel' method."""
traces_center = traces[n_shifts:-n_shifts, :]
length = traces_center.shape[0]
if peak_sign in ('pos', 'both'):
peak_mask = traces_center > abs_threholds[None, :]
for i in range(n_shifts):
peak_mask &= traces_center > traces[i:i + length, :]
peak_mask &= traces_center >= traces[n_shifts + i + 1:n_shifts + i + 1 + length, :]
if peak_sign in ('neg', 'both'):
if peak_sign == 'both':
peak_mask_pos = peak_mask.copy()
peak_mask = traces_center < -abs_threholds[None, :]
for i in range(n_shifts):
peak_mask &= traces_center < traces[i:i + length, :]
peak_mask &= traces_center <= traces[n_shifts + i + 1:n_shifts + i + 1 + length, :]
if peak_sign == 'both':
peak_mask = peak_mask | peak_mask_pos
# find peaks
peak_sample_ind, peak_chan_ind = np.nonzero(peak_mask)
# correct for time shift
peak_sample_ind += n_shifts
return peak_sample_ind, peak_chan_ind
|
c5024e73e103ba50c6d011067849eafb519d7ca7
| 3,637,151
|
def multi_gauss_psf_kernel(psf_parameters, BINSZ=0.02, NEW_BINSZ=0.02, **kwargs):
"""Create multi-Gauss PSF kernel.
The Gaussian PSF components are specified via the
amplitude at the center and the FWHM.
See the example for the exact format.
Parameters
----------
psf_parameters : dict
PSF parameters
BINSZ : float (0.02)
Pixel size used for the given parameters in deg.
NEW_BINSZ : float (0.02)
New pixel size in deg. USed to change the resolution of the PSF.
Returns
-------
psf_kernel : `astropy.convolution.Kernel2D`
PSF kernel
Examples
--------
>>> psf_pars = dict()
>>> psf_pars['psf1'] = dict(ampl=1, fwhm=2.5)
>>> psf_pars['psf2'] = dict(ampl=0.06, fwhm=11.14)
>>> psf_pars['psf3'] = dict(ampl=0.47, fwhm=5.16)
>>> psf_kernel = multi_gauss_psf_kernel(psf_pars, x_size=51)
"""
psf = None
for ii in range(1, 4):
# Convert sigma and amplitude
pars = psf_parameters["psf{}".format(ii)]
sigma = gaussian_fwhm_to_sigma * pars["fwhm"] * BINSZ / NEW_BINSZ
ampl = 2 * np.pi * sigma ** 2 * pars["ampl"]
if psf is None:
psf = float(ampl) * Gaussian2DKernel(sigma, **kwargs)
else:
psf += float(ampl) * Gaussian2DKernel(sigma, **kwargs)
psf.normalize()
return psf
|
07705bcebb02c622c8f1a4cddcad8781ebfa08fa
| 3,637,152
|
from typing import List
from typing import Optional
from typing import Union
def Wavefunction( # type: ignore # pylint: disable=function-redefined
param: List[List[int]],
broken: Optional[Union[List[str], str]] = None) -> 'Wavefunction':
"""Initialize a wavefunction through the fqe namespace
Args:
param (List[List[int]]): parameters for the sectors
broken (Union[List[str], str]): symmetry to be broken
Returns:
(Wavefunction): a wavefunction object meeting the \
criteria laid out in the calling argument
"""
return wavefunction.Wavefunction(param, broken=broken)
|
d5646e26c908c2c824095f20e82cf9418c6115a6
| 3,637,153
|
def extractFiles(comment):
"""Find all files in a comment.
@param comment: The C{unicode} comment text.
@return: A C{list} of about values from the comment, with no duplicates,
in the order they appear in the comment.
"""
return uniqueList(findall(FILE_REGEX, comment))
|
af795598e9f5be973d0e7df771d11d064590881f
| 3,637,154
|
def rint_compute(input_x):
"""rint compute implementation"""
res = akg.lang.cce.round(input_x)
res = akg.lang.cce.cast_to(res, input_x.dtype)
return res
|
f1797518d6b4a7d117ee894c5c0ff26bb4eb09f9
| 3,637,156
|
def _solequal(sol1, sol2, prec):
"""
Compare two different solutions with a given precision.
Return True if they equal.
"""
res = True
for sol_1, sol_2 in zip(sol1, sol2):
if np.ndim(sol_1) != 0 and np.ndim(sol_2) != 0:
res &= _dist(sol_1, sol_2) < prec
elif np.ndim(sol_1) != 0 and np.ndim(sol_2) == 0:
return False
elif np.ndim(sol_1) == 0 and np.ndim(sol_2) != 0:
return False
return res
|
29361d34cf1d1703fa60c8df77132d15e4e1e849
| 3,637,157
|
def clip_rows(data, ord=2, L=1):
"""
Scale clip rows according the same factor to ensure that the maximum value of the
norm of any row is L
"""
max_norm = get_max_norm(data, ord=ord)
print("For order {0}, max norm is {1}".format(ord, max_norm))
normalized_data = data.copy()
modified = 0
for i in range(data.shape[0]):
norm = get_norm(data[i], ord)
if norm > L:
modified += 1
normalized_data[i] = L * normalized_data[i] / norm
print("For order {0}, final max norm is {1}"
.format(ord, get_max_norm(normalized_data, ord=ord)))
print("Had to modify {0} rows ({1}% of total)"
.format(modified, 100*modified / data.shape[0]))
return normalized_data
|
64ed166a88eee193f5b6c157bb2d0f37f02af150
| 3,637,159
|
from typing import Pattern
def extrapolate_to_zero_linear(pattern):
"""
Extrapolates a pattern to (0, 0) using a linear function from the most left point in the pattern
:param pattern: input Pattern
:return: extrapolated Pattern (includes the original one)
"""
x, y = pattern.data
step = x[1] - x[0]
low_x = np.sort(np.arange(min(x), 0, -step))
low_y = y[0] / x[0] * low_x
return Pattern(np.concatenate((low_x, x)),
np.concatenate((low_y, y)))
|
ca148be4a104a0eaff5b765de3a847bdf9c052be
| 3,637,160
|
import random
def findKthSmallest(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
def partition(left, right, pivot_index):
pivot = nums[pivot_index]
# 1. move pivot to end
nums[pivot_index], nums[right] = nums[right], nums[pivot_index]
# 2. move all smaller elements to the left
store_index = left
for i in range(left, right):
if nums[i] < pivot:
nums[store_index], nums[i] = nums[i], nums[store_index]
store_index += 1
# 3. move pivot to its final place
nums[right], nums[store_index] = nums[store_index], nums[right]
return store_index
def select(left, right, k_smallest):
"""
Returns the k-th smallest element of list within left..right
"""
if left == right: # If the list contains only one element,
return nums[left] # return that element
# select a random pivot_index between
pivot_index = random.randint(left, right)
# find the pivot position in a sorted list
pivot_index = partition(left, right, pivot_index)
# the pivot is in its final sorted position
if k_smallest == pivot_index:
return nums[k_smallest]
# go left
elif k_smallest < pivot_index:
return select(left, pivot_index - 1, k_smallest)
# go right
else:
return select(pivot_index + 1, right, k_smallest)
return select(0, len(nums) - 1, k)
|
d82176bd9539cf36416c5dc3c7da53a99f2a8f62
| 3,637,161
|
def racetrack_AP_RR_TF(
wavelength,
sw_angle=90,
radius=12,
couplerLength=4.5,
gap=0.2,
width=0.5,
thickness=0.2,
widthCoupler=0.5,
loss=[0.99],
coupling=[0],
):
"""This particular transfer function assumes that the coupling sides of the
ring resonator are straight, and the other two sides are curved. Therefore,
the roundtrip length of the RR is 2*pi*radius + 2*couplerLength. This model
also includes loss. (??? Need Verification on last line)
We assume that the round parts of the ring have negligble coupling compared to
the straight sections.
Parameters
-----------
wavelength : ndarray (N,)
Wavelength points to evaluate
radius : float
Radius of the sides in microns
couplerLength : float
Length of the coupling region in microns
gap : float
Gap in the coupler region in microns
width : float
Width of the waveguides in microns
thickness : float
Thickness of the waveguides in microns
Returns
-------
E : ndarray
Complex array of size (N,)
alpha : ndarray
Array of size (N,)
t : ndarray
Array of size (N,)
alpha_s : ndarray
Array of size (N,)
phi : ndarray
Array of size (N,)
"""
# Sanitize the input
wavelength = np.squeeze(wavelength)
# N = wavelength.shape[0]
# calculate coupling
cTE0, cTE1 = evWGcoupler(
wavelength=wavelength,
width=widthCoupler,
thickness=thickness,
sw_angle=sw_angle,
gap=gap,
)
n1 = np.squeeze(cTE0) # Get the first mode of the coupler region
n2 = np.squeeze(cTE1) # Get the second mode of the coupler region
Beta1 = 2 * np.pi * n1 / wavelength
Beta2 = 2 * np.pi * n2 / wavelength
x = 0.5 * (np.exp(1j * Beta1 * couplerLength) + np.exp(1j * Beta2 * couplerLength))
y = 0.5 * (
np.exp(1j * Beta1 * couplerLength)
+ np.exp(1j * Beta2 * couplerLength - 1j * np.pi)
)
alpha_c = np.sqrt(np.abs(x) ** 2 + np.abs(y) ** 2)
t_c = x
# k_c = y
# Construct the coupling polynomial
# couplingPoly = np.poly1d(coupling)
# r = np.abs(x) - couplingPoly(wavelength)
# k = np.abs(y)
# calculate bent waveguide
TE0_B = np.squeeze(
bentWaveguide(
wavelength=wavelength,
width=width,
thickness=thickness,
sw_angle=sw_angle,
radius=radius,
)
)
# calculate straight waveguide
TE0 = np.squeeze(
straightWaveguide(
wavelength=wavelength, width=width, thickness=thickness, sw_angle=sw_angle
)
)
# Calculate round trip length
# L = 2 * np.pi * radius + 2 * couplerLength
# calculate total loss
# alpha = np.squeeze(np.exp(- np.imag(TE0) * 2*couplerLength - np.imag(TE0_B)*2*np.pi*radius - lossPoly(wavelength)*L))
alpha_t = np.exp(
-np.imag(TE0) * 2 * couplerLength - np.imag(TE0_B) * 2 * np.pi * radius
)
alpha_m = np.squeeze(alpha_c * alpha_t)
offset = np.mean(alpha_m)
lossTemp = loss.copy()
lossTemp[-1] = loss[-1] - (1 - offset)
lossPoly = np.poly1d(loss)
alpha = lossPoly(wavelength)
alpha_s = alpha - alpha_m
# calculate phase shifts
phi_c = np.unwrap(np.angle(t_c))
BetaStraight = np.unwrap(2 * np.pi * np.real(TE0) / wavelength)
BetaBent = np.unwrap(2 * np.pi * np.real(TE0_B) / wavelength)
phi_r = np.squeeze(BetaStraight * couplerLength + BetaBent * 2 * np.pi * radius)
phi = np.unwrap(phi_r + phi_c)
t = np.abs(t_c) / alpha_c
## Cascade final coupler
# E = np.exp(1j*(np.pi+phi)) * (alpha - r*np.exp(-1j*phi))/(1-r*alpha*np.exp(1j*phi))
E = (
(t - alpha * np.exp(1j * phi))
/ (1 - alpha * t * np.exp(1j * phi))
* (t_c / np.conj(t_c))
* alpha_c
* np.exp(-1j * phi_c)
)
# Output final s matrix
return E, alpha, t, alpha_s, phi
|
e6bc912970333b901bf70e573a8b9194f6255de5
| 3,637,162
|
from typing import Union
from typing import Iterator
import tqdm
def consume_chunks(generator: Union[PandasTextFileReader, Iterator], progress: bool = True, total: int = None):
"""Transform the result of chained filters into a pandas DataFrame
:param generator: iterator to be transformed into a dataframe
:param progress: whether to show progress
:param total: total number of chunks the input is divided in
"""
data = []
if progress:
pbar = tqdm(generator, total=total)
else:
pbar = generator
for item in pbar:
if not isinstance(item, pd.DataFrame):
consumed = _consume_deeper_chunks(item)
data.extend(consumed)
else:
data.append(item)
if not len(data):
return pd.DataFrame()
return pd.concat(data, axis=0)
|
60198262341e9bd6dd5170cb98439c5b9975a238
| 3,637,164
|
def lang_not_found(s):
"""Is called when the language files aren't found"""
return s + "⚙"
|
064d73e10d6e2aa9436557b38941ed2eb020d7bb
| 3,637,165
|
def _get_corr_matrix(corr, rho):
"""Preprocessing of correlation matrix ``corr`` or
correlation values ``rho``.
Given either ``corr`` or ``rho`` (each may be an array,
callable or process instance), returns the corresponding,
possibly time-dependent correlation matrix,
with a ``shape`` attribute set to
its shape (may be set to None if attempts to
retrieve shape information fail).
If ``corr`` is not None, ``rho`` is ignored.
If both are None, returns None.
"""
# exit if no correlations specified
if corr is None and rho is None:
return None
elif corr is not None:
# if present, corr overrides rho
corr = _variable_param_setup(corr)
cshape = _get_param_shape(corr)
if cshape is not None:
if len(cshape) not in (2, 3) or cshape[0] != cshape[1] or \
(len(cshape) == 3 and cshape[2] != 1):
raise ValueError(
"the correlation matrix ``corr`` should be square, "
"possibly with a trailing 1-dimensional axis matching "
"the paths axis, not an array with shape {}"
.format(cshape))
else:
# corr is None: build correlation matrix from rho,
# either statically or dynamically
rho = _variable_param_setup(rho)
rho_shape = _get_param_shape(rho)
if rho_shape is not None:
if len(rho_shape) > 2 or \
(len(rho_shape) == 2 and rho_shape[1] != 1):
raise ValueError(
"correlation ``rho`` should be a vector, "
"possibly with a trailing 1-dimensional axis matching "
"the paths axis, not an array with shape {}"
.format(rho.shape))
if callable(rho):
def corr(t):
return _const_rho_to_corr(rho(t))
corr.shape = None if rho_shape is None else \
(2, 2) if rho_shape == () else \
(2*rho_shape[0], 2*rho_shape[0])
else:
corr = _const_rho_to_corr(rho)
return corr
|
8241c0245cbd4b8554c31deb28179556c9da8cd1
| 3,637,166
|
def sequence_exact_match(true_seq, pred_seq):
"""
Boolean return value indicates whether or not seqs are exact match
"""
true_seq = strip_whitespace(true_seq)
pred_seq = strip_whitespace(pred_seq)
return pred_seq["start"] == true_seq["start"] and pred_seq["end"] == true_seq["end"]
|
574ad0a7ad0a31875c298824fc1230bdf662f356
| 3,637,168
|
def same_variable(a, b):
"""
Cette fonction dit si les deux objets sont en fait le même objet (True)
ou non (False) s'ils sont différents (même s'ils contiennent la même information).
@param a n'importe quel objet
@param b n'importe quel objet
@return ``True`` ou ``False``
.. faqref::
:tag: python
:title: Qu'est-ce qu'un type immuable ou immutable ?
:lid: faq-py-immutable
Une variable de type *immuable* ne peut être modifiée. Cela concerne principalement :
- ``int``, ``float``, ``str``, ``tuple``
Si une variable est de type *immuable*, lorsqu'on effectue une opération,
on créé implicitement une copie de l'objet.
Les dictionnaires et les listes sont *modifiables* (ou *mutable*). Pour une variable
de ce type, lorsqu'on écrit ``a = b``, ``a`` et ``b`` désigne le même objet même
si ce sont deux noms différentes. C'est le même emplacement mémoire
accessible paur deux moyens (deux identifiants).
Par exemple ::
a = (2,3)
b = a
a += (4,5)
print( a == b ) # --> False
print(a,b) # --> (2, 3, 4, 5) (2, 3)
a = [2,3]
b = a
a += [4,5]
print( a == b ) # --> True
print(a,b) # --> [2, 3, 4, 5] [2, 3, 4, 5]
Dans le premier cas, le type (``tuple``) est _immutable_, l'opérateur ``+=`` cache implicitement une copie.
Dans le second cas, le type (``list``) est _mutable_, l'opérateur ``+=`` évite la copie
car la variable peut être modifiée. Même si ``b=a`` est exécutée avant l'instruction suivante,
elle n'a **pas** pour effet de conserver l'état de ``a`` avant l'ajout d'élément.
Un autre exemple ::
a = [1, 2]
b = a
a [0] = -1
print(a) # --> [-1, 2]
print(b) # --> [-1, 2]
Pour copier une liste, il faut expliciter la demander ::
a = [1, 2]
b = list(a)
a [0] = -1
print(a) # --> [-1, 2]
print(b) # --> [1, 2]
La page `Immutable Sequence Types <https://docs.python.org/3/library/stdtypes.html?highlight=immutable#immutable-sequence-types>`_
détaille un peu plus le type qui sont *mutable* et ceux qui sont *immutable*. Parmi les types standards :
* **mutable**
* `bool <https://docs.python.org/3/library/functions.html#bool>`_
* `int <https://docs.python.org/3/library/functions.html#int>`_,
`float <https://docs.python.org/3/library/functions.html#float>`_,
`complex <https://docs.python.org/3/library/functions.html#complex>`_
* `str <https://docs.python.org/3/library/functions.html#func-str>`_,
`bytes <https://docs.python.org/3/library/functions.html#bytes>`_
* `None <https://docs.python.org/3/library/constants.html?highlight=none#None>`_
* `tuple <https://docs.python.org/3/library/functions.html#func-tuple>`_,
`frozenset <https://docs.python.org/3/library/functions.html#func-frozenset>`_
* **immutable**, par défaut tous les autres types dont :
* `list <https://docs.python.org/3/library/functions.html#func-list>`_
* `dict <https://docs.python.org/3/library/functions.html#func-dict>`_
* `set <https://docs.python.org/3/library/functions.html#func-set>`_
* `bytearray <https://docs.python.org/3/library/functions.html#bytearray>`_
Une instance de classe est mutable. Il est possible de la rendre
immutable par quelques astuces :
* `__slots__ <https://docs.python.org/3/reference/datamodel.html?highlight=_slots__#object.__slots__>`_
* `How to Create Immutable Classes in Python
<http://www.blog.pythonlibrary.org/2014/01/17/how-to-create-immutable-classes-in-python/>`_
* `Ways to make a class immutable in Python <http://stackoverflow.com/questions/4996815/ways-to-make-a-class-immutable-in-python>`_
* `freeze <https://freeze.readthedocs.org/en/latest/>`_
Enfin, pour les objects qui s'imbriquent les uns dans les autres, une liste de listes, une classe
qui incluent des dictionnaires et des listes, on distingue une copie simple d'une copie intégrale (**deepcopy**).
Dans le cas d'une liste de listes, la copie simple recopie uniquement la première liste ::
import copy
l1 = [ [0,1], [2,3] ]
l2 = copy.copy(l1)
l1 [0][0] = '##'
print(l1,l2) # --> [['##', 1], [2, 3]] [['##', 1], [2, 3]]
l1 [0] = [10,10]
print(l1,l2) # --> [[10, 10], [2, 3]] [['##', 1], [2, 3]]
La copie intégrale recopie également les objets inclus ::
import copy
l1 = [ [0,1], [2,3] ]
l2 = copy.deepcopy(l1)
l1 [0][0] = '##'
print(l1,l2) # --> [['##', 1], [2, 3]] [[0, 1], [2, 3]]
Les deux fonctions s'appliquent à tout object Python : `module copy <https://docs.python.org/3/library/copy.html>`_.
"""
return id(a) == id(b)
|
0c33a33e01e5457c7216982df580abc90db47d2f
| 3,637,169
|
def format_level_2_memory(memory, header=None):
"""Format an experiment result memory object for measurement level 2.
Args:
memory (list): Memory from experiment with `meas_level==2` and `memory==True`.
header (dict): the experiment header dictionary containing
useful information for postprocessing.
Returns:
list[str]: List of bitstrings
"""
memory_list = []
for shot_memory in memory:
memory_list.append(format_counts_memory(shot_memory, header))
return memory_list
|
ebb8b0ca2e34ac93aaec01efe05a8a4d5de785d5
| 3,637,170
|
from .objectbased.conversion import to_polar
def convert_objects_to_polar(rendering_items):
"""Apply conversion to turn all Objects block formats into polar."""
return list(apply_to_object_blocks(rendering_items, to_polar))
|
df7206530e60d3765b1eaf7a3d6b45a41efc50c0
| 3,637,171
|
from typing import Tuple
import ast
def find_in_module(var_name: str, module, i: int = 0) -> Tuple[str, ast.AST]:
"""Find the piece of code that assigned a value to the variable with name *var_name* in the
module *module*.
:param var_name: Name of the variable to look for.
:param module: Module to search.
:returns: Tuple with source code segment and corresponding ast node.
"""
source = sourceget.get_module_source(module)
return find_in_source(var_name, source, i=i)
|
7cb6e6bd17018e72953273e53c2fe5f9ac73f2c2
| 3,637,172
|
def empty(shape,
dtype="f8",
order="C",
device=None,
usm_type="device",
sycl_queue=None):
"""Creates `dpnp_array` from uninitialized USM allocation."""
array_obj = dpt.empty(shape,
dtype=dtype,
order=order,
device=device,
usm_type=usm_type,
sycl_queue=sycl_queue)
return dpnp_array(array_obj.shape, buffer=array_obj, order=order)
|
3229a4a99a1073c9bee636d630a818d5c91a3c97
| 3,637,173
|
def solve2(input_data):
"""use scipy.ndimage"""
data_array = np.array(parse(input_data))
# boundaries of objects must be 0 for scipy label
# convert 0 in data to -1 and 9 to 0
data_array[data_array == 0] = -1
data_array[data_array == 9] = 0
labels, _ = label(data_array)
_, counts = np.unique(labels, return_counts=True)
counts[1:].sort()
return counts[-3:].prod()
|
0ba8767020388c33a068b10f89b9cacd51f9e85d
| 3,637,174
|
import math
def yolox_semi_warm_cos_lr(
lr,
min_lr_ratio,
warmup_lr_start,
total_iters,
normal_iters,
no_aug_iters,
warmup_total_iters,
semi_iters,
iters_per_epoch,
iters_per_epoch_semi,
iters,
):
"""Cosine learning rate with warm up."""
min_lr = lr * min_lr_ratio
if iters <= warmup_total_iters:
# lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start
lr = (lr - warmup_lr_start) * pow(
iters / float(warmup_total_iters), 2
) + warmup_lr_start
elif iters >= normal_iters + semi_iters:
lr = min_lr
elif iters <= normal_iters:
lr = min_lr + 0.5 * (lr - min_lr) * (
1.0
+ math.cos(
math.pi
* (iters - warmup_total_iters)
/ (total_iters - warmup_total_iters - no_aug_iters)
)
)
else:
lr = min_lr + 0.5 * (lr - min_lr) * (
1.0
+ math.cos(
math.pi
* (
normal_iters
- warmup_total_iters
+ (iters - normal_iters)
* iters_per_epoch
* 1.0
/ iters_per_epoch_semi
)
/ (total_iters - warmup_total_iters - no_aug_iters)
)
)
return lr
|
ac6b1850031a5c36f8de2c7597c374bc401aaee3
| 3,637,175
|
def builder(obj, dep, denominator=None):
""" A func that modifies its obj without explicit return. """
def decorate(func):
tasks.append(Builder(func, obj, dep, denominator))
return func
return decorate
|
8b9d9887324c6aa931efcf905db56ded606c6d84
| 3,637,176
|
def on_segment(p, r, q, epsilon):
"""
Given three colinear points p, q, r, and a threshold epsilone, determine if
determine if point q lies on line segment pr
"""
# Taken from http://stackoverflow.com/questions/328107/how-can-you-determine-a-point-is-between-two-other-points-on-a-line-segment
crossproduct = (q.y - p.y) * (r.x - p.x) - (q.x - p.x) * (r.y - p.y)
if abs(crossproduct) > epsilon:
return False # (or != 0 if using integers)
dotproduct = (q.x - p.x) * (r.x - p.x) + (q.y - p.y)*(r.y - p.y)
if dotproduct < 0:
return False
squaredlengthba = (r.x - p.x)*(r.x - p.x) + (r.y - p.y)*(r.y - p.y)
if dotproduct > squaredlengthba:
return False
return True
|
b8517fc9d3c6d916cac698913c35ba4e5d873697
| 3,637,178
|
def groupby_times(df, kind, unit=None):
"""Groupby specific times
Parameters
----------
df : pandas.DataFrame
DataFrame with `pandas.TimedeltaIndex` as index.
kind : {'monthly', 'weekly', 'daily', 'hourly', 'minutely', 'all'}
How to group `df`.
unit : str (optional)
What unit to use
Returns
-------
Grouped
"""
def tmp_since_last(freq):
if freq:
return since_last(df.index, freq, unit)
else:
return None
key_dict = {
'monthly': 'M',
'weekly': 'w',
'daily': 'd',
'hourly': 'h',
'minutely': 'm',
'secondly': 's',
'all': None
}
# key_dict.update({v:v for v in key_dict.values()})
if kind not in key_dict:
raise NotImplementedError('key must be something else')
# group_key = since_last(df.index, kind, unit)
else:
group_key = tmp_since_last(key_dict[kind])
grouped = df.groupby(group_key)
return grouped
|
81d5a17e3f89b36a0ce88867ce6d04cd1602a0b4
| 3,637,179
|
def pid_to_path(pid):
"""Returns the full path of the executable of a process given its pid."""
ps_command = "ps -o command " + pid
ps_output = execute(ps_command)
command = get_command(ps_output)
whereis_command = "whereis " + command
whereis_output = execute(whereis_command)
path = get_path(whereis_output)
if path == "":
return command
else:
return path
|
942a5756f9b4aecb51472efce558f86d0b9c8d67
| 3,637,180
|
def get_script_histogram(utext):
"""Return a map from script to character count + chars, excluding some common
whitespace, and inherited characters. utext is a unicode string."""
exclusions = {0x00, 0x0A, 0x0D, 0x20, 0xA0, 0xFEFF}
result = {}
for cp in utext:
if ord(cp) in exclusions:
continue
script = unicode_data.script(cp)
if script == "Zinh":
continue
if script not in result:
result[script] = [1, {cp}]
else:
r = result[script]
r[0] += 1
r[1].add(cp)
return result
|
657e60bc1a8d6c7b436cf4f8700041abe41721ea
| 3,637,181
|
def ja_nein_vielleicht(*args):
"""
Ohne Argumente erstellt diese Funktion eine Ja-Nein-Vielleicht Auswahl. Mit
einem Argument gibt es den Wert der entsprechenden Auswahl zurück.
"""
values = {
True: "Vermutlich ja",
False: "Vermutlich nein",
None: "Kann ich noch nicht sagen"
}
if args:
return values[args[0]]
else:
return [
{True: values[True]},
{False: values[False]},
{None: values[None]}
]
|
a4e58ab3f2dc9662e1c054ddfd32ff1ae988b438
| 3,637,182
|
def ebic(covariance, precision, n_samples, n_features, gamma=0):
"""
Extended Bayesian Information Criteria for model selection.
When using path mode, use this as an alternative to cross-validation for
finding lambda.
See:
"Extended Bayesian Information Criteria for Gaussian Graphical Models"
R. Foygel and M. Drton, NIPS 2010
Parameters
----------
covariance : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance (sample covariance)
precision : 2D ndarray (n_features, n_features)
The precision matrix of the model to be tested
n_samples : int
Number of examples.
n_features : int
Dimension of an example.
lam: (float)
Threshold value for precision matrix. This should be lambda scaling
used to obtain this estimate.
gamma : (float) \in (0, 1)
Choice of gamma=0 leads to classical BIC
Positive gamma leads to stronger penalization of large graphs.
Returns
-------
ebic score (float). Caller should minimized this score.
"""
l_theta = -np.sum(covariance * precision) + fast_logdet(precision)
l_theta *= n_features / 2.
# is something goes wrong with fast_logdet, return large value
if np.isinf(l_theta) or np.isnan(l_theta):
return 1e10
mask = np.abs(precision.flat) > np.finfo(precision.dtype).eps
precision_nnz = (np.sum(mask) - n_features) / 2.0 # lower off diagonal tri
return (
-2.0 * l_theta
+ precision_nnz * np.log(n_samples)
+ 4.0 * precision_nnz * np.log(n_features) * gamma
)
|
e5183ee7a4b0f4edc7509afb7217e4203a73919a
| 3,637,183
|
def _process_cli_plugin(bases, attrdict) -> dict:
"""Process a CLI plugin, generate its hook functions, and return a new
attrdict with all attributes set correctly.
"""
attrdict_copy = dict(attrdict) # copy to avoid mutating original
if cli.Command in bases and cli.CommandExtension in bases:
raise exceptions.PlugError(
"A plugin cannot be both a Command and a CommandExtension"
)
if cli.Command in bases:
settings = attrdict_copy.get("__settings__", cli.command_settings())
attrdict_copy["__settings__"] = settings
_check_base_parsers(settings.base_parsers or [], attrdict_copy)
elif cli.CommandExtension in bases:
if "__settings__" not in attrdict_copy:
raise exceptions.PlugError(
"CommandExtension must have a '__settings__' attribute"
)
handle_processed_args = _generate_handle_processed_args_func()
attrdict_copy[handle_processed_args.__name__] = handle_processed_args
attrdict_copy["attach_options"] = _attach_options
configurable_argnames = list(_get_configurable_arguments(attrdict))
if configurable_argnames:
def get_configurable_args(self) -> ConfigurableArguments:
return ConfigurableArguments(
config_section_name=self.__settings__.config_section_name
or self.__plugin_name__,
argnames=list(
_get_configurable_arguments(self.__class__.__dict__)
),
)
attrdict_copy[get_configurable_args.__name__] = get_configurable_args
return attrdict_copy
|
999f5011532ae67626ff5a7f416efcfad447c127
| 3,637,184
|
def get_group(yaml_dict):
"""
Return the attributes of the light group
:param yaml_dict:
:return:
"""
group_name = list(yaml_dict["groups"].keys())[0]
group_dict = yaml_dict["groups"][group_name]
# Check group_dict has an id attribute
if 'id' not in group_dict.keys():
print("Error, expected to find an 'id' attribute in the group object")
return group_dict
|
db9e027594d3a9a9e0a1838da62316cfe6e0c380
| 3,637,185
|
def plot_time(
monitors,
labels,
savefile,
title="Average computation time per epoch",
ylabel="Seconds",
log=False,
directory=DEFAULT_DIRECTORY,
):
"""Plots the computation time required for each step as a horizontal bar
plot
:param monitors: a list of monitor sets: [(training, evaluation, inference)]
:param labels: a list of strings for the label of each monitor
:param savefile: name of the file to save. If none, then will not save
:param title: title of the figure
:param ylabel: label for the y-axis
:param log: whether to plot a log-plot. Can also be set to "symlog"
:param directory: directory to save the file in. Defaults to the results dir
:returns: the figure
"""
clean_labels = _correct_and_clean_labels(labels)
all_times = np.array(
[
[
np.mean(
[
np.sum(epoch["total"])
for epoch in training_monitor.timing
]
),
np.mean(
[
np.sum([iteration["total"] for iteration in epoch])
for epoch in projection_monitor.timing
]
),
]
for (
training_monitor,
evaluation_monitor,
projection_monitor,
) in monitors
]
)
# Using the recipe for a grouped bar plot
fig = plt.figure()
# set width of bars
bar_width = 1.0 / (1.0 + all_times.shape[1])
colors = list()
for i, times in enumerate(all_times):
positions = bar_width * np.arange(len(times)) + i
for j, (position, time, label) in enumerate(
zip(positions, times, ["Training", "Projection"])
):
if i == 0:
line2d = plt.bar(position, time, width=bar_width, label=label)
colors.append(line2d[0].get_facecolor())
else:
plt.bar(position, time, width=bar_width, color=colors[j])
# Add ticks on the middle of the group bars
xs = (
np.arange(len(all_times))
+ 0.5 * all_times.shape[1] * bar_width
- 0.5 * bar_width
)
plt.xticks(xs, clean_labels)
plt.legend()
# possibly make log plot
if log:
if log == "symlog":
plt.yscale("symlog")
else:
plt.yscale("log")
plt.ylabel(ylabel)
plt.title(title)
plt.tight_layout()
if savefile is not None:
filepath = f"{directory}/{savefile}.png"
print(f"Saving timing plot to {filepath}")
plt.savefig(filepath, dpi=300)
return fig
|
7723be1933bd9f2dd84e1ebec7364b4cbe942601
| 3,637,186
|
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
|
fc2a8692046fe32884cb75d405d21fce6301a88d
| 3,637,187
|
def recommend(model):
"""
Generate n recommendations.
:param model: recommendation model
:return: tuple(recommendations made by model, recommendations made by primitive model, recall, coverage)
"""
n = 10
hit = 0 # used for recall calculation
total_recommendations = 0
all_recommendations = [] # used for coverage calculation
recommendations = {}
primitive_recommendations = {}
for user_id, user_profile in X_test_prepared.iterrows(): # iterate over test users, user_profile is a tuple
prediction = model.predictItemByUser(user_profile[1], user_profile[0], n)
primitive_prediction = primitive_model.predictItemByUser(None, user_profile[0], n, ratings_cleaned_df)
# primitive_predictions = primitive_model.test()
if prediction is None or prediction.ndim == 0:
continue
if user_profile[2] in prediction: # if prediction contains control item increase hit counter
hit += 1
recommendations[user_id] = prediction
primitive_recommendations[user_id] = primitive_prediction
all_recommendations.extend(list(prediction))
total_recommendations += 1
if total_recommendations > 0:
recall = hit / total_recommendations
else:
recall = 0
coverage = np.unique(all_recommendations).shape[0] / model.train_data.shape[1]
return recommendations, primitive_recommendations, recall, coverage
|
07d5e538cbfafd60bee7030fd31e6c9b5d178cfa
| 3,637,188
|
def drop_nondominant_term(latex_dict: dict) -> str:
"""
given
x = \\langle\\psi_{\\alpha}| \\hat{A} |\\psi_{\\beta}\\rangle
return
x = \\langle\\psi_{\\alpha}| a_{\\beta} |\psi_{\\beta} \\rangle
>>> latex_dict = {}
>>> latex_dict['input'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> latex_dict['feed'] = [parse_latex('')]
>>> latex_dict['output'] = [{'LHS': parse_latex(''), 'RHS': parse_latex('')}]
>>> drop_nondominant_term(latex_dict)
'step is valid'
"""
logger.info("[trace]")
return "no check performed"
|
6f53dcce5e17761d6ab9f7f0a772dcd81d91b33e
| 3,637,190
|
def template_introduce():
"""
This function constructs three image carousels for self introduction.
Check also: faq_bot/model/data.py
reference
- `Common Message Property <https://developers.worksmobile.com/kr/document/100500805?lang=en>`_
:return: image carousels type message content.
"""
fmt = _("See FAQs")
action0 = make_i18n_message_action("query_leave", "query", "See FAQs", fmt,
"See FAQs", fmt)
action1 = make_i18n_message_action("query_welfare", "query", "See FAQs",
fmt, "See FAQs", fmt)
action2 = make_i18n_message_action("query_security", "query", "See FAQs",
fmt, "See FAQs", fmt)
fmt_title0 = _("HR/Leave")
fmt_subtitle0 = _("See FAQs about HR and leave.")
element0 = make_i18n_list_template_element("query", "HR/Leave",
"See FAQs about HR and leave.",
image=CAROUSEL["leave"][0],
action=action0,
fmt_title=fmt_title0,
fmt_subtitle=fmt_subtitle0)
fmt_title1 = _("Welfare/Work support")
fmt_subtitle1 = _("See FAQs about welfare and work support.")
element1 = make_i18n_list_template_element("query", "Welfare/Work support",
"See FAQs about welfare "
"and work support.",
image=CAROUSEL["welfare"][0],
action=action1,
fmt_title=fmt_title1,
fmt_subtitle=fmt_subtitle1)
fmt_title2 = _("Security")
fmt_subtitle2 = _("See FAQs about security.")
element2 = make_i18n_list_template_element("query", "Security",
"See FAQs about security.",
image=CAROUSEL["security"][0],
action=action2,
fmt_title = fmt_title2,
fmt_subtitle = fmt_subtitle2)
return make_list_template([element0, element1, element2])
|
f0b6585512b8419932c1be38831057508a4454eb
| 3,637,191
|
def assign_distance_to_mesh_vertex(vkey, weight, target_LOW, target_HIGH):
"""
Fills in the 'get_distance' attribute for a single vertex with vkey.
Parameters
----------
vkey: int
The vertex key.
weight: float,
The weighting of the distances from the lower and the upper target, from 0 to 1.
target_LOW: :class: 'compas_slicer.pre_processing.CompoundTarget'
The lower compound target.
target_HIGH: :class: 'compas_slicer.pre_processing.CompoundTarget'
The upper compound target.
"""
if target_LOW and target_HIGH: # then interpolate targets
d = get_weighted_distance(vkey, weight, target_LOW, target_HIGH)
elif target_LOW: # then offset target
offset = weight * target_LOW.get_max_dist()
d = target_LOW.get_distance(vkey) - offset
else:
raise ValueError('You need to provide at least one target')
return d
|
5859ef6535d394d098a92603b2a3e6ac7c619e51
| 3,637,192
|
import hashlib
def _get_user_by_email_or_username(request):
"""
Finds a user object in the database based on the given request, ignores all fields except for email and username.
"""
if 'email_or_username' not in request.POST or 'password' not in request.POST:
raise AuthFailedError(_('There was an error receiving your login information. Please email us.'))
email_or_username = request.POST.get('email_or_username', None)
try:
return USER_MODEL.objects.get(
Q(username=email_or_username) | Q(email=email_or_username)
)
except USER_MODEL.DoesNotExist:
digest = hashlib.shake_128(email_or_username.encode('utf-8')).hexdigest(16) # pylint: disable=too-many-function-args
AUDIT_LOG.warning(f"Login failed - Unknown user username/email {digest}")
|
7bf8ced15acd226b647f0b2e272699c41c3432bc
| 3,637,193
|
def get_minsize_assignment(N, min_comm_size):
"""Create membership vector where each community contains at least
as a certain number of nodes.
Parameters
----------
N : int
Desired length of membership vector
min_comm_size : int
Minimum number of nodes each community should have.
Returns
-------
np.array
Membership vector
"""
num_comms = int(N / min_comm_size)
membership = -np.ones(N, dtype='int') # -1 means non-assigned
for c in range(num_comms):
left_to_assign = np.flatnonzero(membership == -1)
assign = np.random.choice(left_to_assign, min_comm_size, replace=False)
membership[assign] = c
membership[membership == -1] = np.random.randint(num_comms, size=np.sum(membership == -1))
return membership
|
e708b81a2b16d9885a0625d275fedcf001308c00
| 3,637,195
|
def _combine_plots(
p1, p2, combine_rules=None,
sort_plot=False, sort_key=lambda x_y: x_y[0]
):
"""Combine two plots into one, following the given combine_rules to
determine how to merge the constants
:param p1: 1st plot to combine
:param p2: 2nd plot to combine
:param combine_rules: list of combine rules, which define how constants
in const_list and const_dict are merged. See definition above.
:param sort_plot: if true, sort the resulting plot according to the
sort_key. Default is to sort by x value.
:param sort_key: function that, when given a plot, returns a comparable
item, by which the plot is sorted.
:return: combined plot
"""
# Combine x arrays with each other and y arrays with each other
x1, y1 = p1[0:2]
x2, y2 = list(), list()
for x2i, y2i in zip(*p2[0:2]):
if x2i not in x1:
x2.append(x2i)
y2.append(y2i)
x = np.concatenate((x1, np.array(x2)))
y = np.concatenate((y1, np.array(y2)))
# Sort plot
if sort_plot:
next_x, next_y = list(), list()
for xi, yi in sorted(zip(x, y), key=sort_key):
next_x.append(xi)
next_y.append(yi)
x = np.array(next_x)
y = np.array(next_y)
# Combine constant lists
const_list = list()
for c1, c2 in zip(p1[2], p2[2]):
if c1 is not None and c2 is not None and _const_equals(c1, c2):
const_list.append(c1)
else:
const_list.append(None)
const_dict = dict()
# Combine constant dicts
d1, d2 = p1[3], p2[3]
for k in set(d1.keys() + d2.keys()):
if k in d1 and k in d2:
v1, v2 = d1[k], d2[k]
if v1 is not None and v2 is not None and _const_equals(v1, v2):
const_dict[k] = d1[k]
else:
const_dict[k] = None
else:
const_dict[k] = None
# Other combine rules
p = x, y, const_list, const_dict
if combine_rules is not None:
for rule in combine_rules:
p = rule(p, p1, p2)
return p
|
93665498ba30af51020300f774ba5f0cfc2684ce
| 3,637,196
|
def shape_of(array, *, strict=False):
"""
Return the shape of array. (sizes of each dimension)
"""
shape = []
layer = array
while True:
if not isinstance(layer, (tuple, list)):
break
size = len(layer)
shape.append(size)
if not size:
break
layer = layer[0]
if strict:
layers = deque(
(str(i), sub)
for i, sub in enumerate(array)
)
for size in shape[1:]:
for _ in range(len(layers)):
indices, layer = layers.popleft()
if not isinstance(layer, (tuple, list)):
raise ValueError(
f"array is not uniform: "
f"not isinstance(array[{indices}], (tuple, list)) ({layer})"
)
if len(layer) != size:
raise ValueError(
f"array is not uniform: "
f"len(array[{indices}]) ({layer}) != {size}"
)
layers.extend(
(indices + f", {i}", sub)
for i, sub in enumerate(layer)
)
for _ in range(len(layers)):
indices, layer = layers.popleft()
if isinstance(layer, (tuple, list)):
raise ValueError(
f"array is not uniform: "
f"isinstance(array[{indices}], (tuple, list)) ({layer})"
)
return tuple(shape)
|
c6e889338761897c1e036bef29cd73bd430608aa
| 3,637,197
|
def return_state_dict(network):
"""
save model to state_dict
"""
feat_model = {k: v.cpu() for k, v in network["feat_model"].state_dict().items()}
classifier = {k: v.cpu() for k, v in network["classifier"].state_dict().items()}
return {"feat_model": feat_model, "classifier": classifier}
|
c0bcd9bd84f7c722c7de5f52d12cf6762a86e1e0
| 3,637,198
|
def get_elevation_data(lonlat, dem_path):
"""
Get elevation data for a scene.
:param lon_lat:
The latitude, longitude of the scene center.
:type lon_lat:
float (2-tuple)
:dem_dir:
The directory in which the DEM can be found.
:type dem_dir:
str
"""
datafile = pjoin(dem_path, "DEM_one_deg.tif")
url = urlparse(datafile, scheme='file').geturl()
try:
data = get_pixel(datafile, lonlat) * 0.001 # scale to correct units
except IndexError:
raise AncillaryError("No Elevation data")
metadata = {'data_source': 'Elevation',
'url': url}
# ancillary metadata tracking
md = extract_ancillary_metadata(datafile)
for key in md:
metadata[key] = md[key]
return data, metadata
|
b7876bbae41bb6fbadbeff414485a2edff2646bf
| 3,637,199
|
from datetime import datetime
def iso8601(dt=None, aware=False):
"""
Returns string datetime stamp in iso 8601 format from datetime object dt
If dt is missing and aware then use now(timezone.utc) else utcnow() naive
YYYY-MM-DDTHH:MM:SS.mmmmmm which is strftime '%Y-%m-%dT%H:%M:%S.%f'
Only TZ aware in python 3.2+
"""
if dt is None:
if aware and hasattr(datetime, "timezone"):
dt = datetime.datetime.now(datetime.timezone.utc) # make it aware
else: # naive
dt = datetime.datetime.utcnow() # naive
return(dt.isoformat())
|
181d2f38b39792cc0331ee7bd9a34f76691b5128
| 3,637,200
|
import re
import string
def parse_text(infile, xpath=None, filter_words=None, attributes=None):
"""Filter text using XPath, regex keywords, and tag attributes.
Keyword arguments:
infile -- HTML or text content to parse (list)
xpath -- an XPath expression (str)
filter_words -- regex keywords (list)
attributes -- HTML tag attributes (list)
Return a list of strings of text.
"""
infiles = []
text = []
if xpath is not None:
infile = parse_html(infile, xpath)
if isinstance(infile, list):
if isinstance(infile[0], lh.HtmlElement):
infiles = list(infile)
else:
text = [line + '\n' for line in infile]
elif isinstance(infile, lh.HtmlElement):
infiles = [infile]
else:
text = [infile]
else:
infiles = [infile]
if attributes is not None:
attributes = [clean_attr(x) for x in attributes]
attributes = [x for x in attributes if x]
else:
attributes = ['text()']
if not text:
text_xpath = '//*[not(self::script) and not(self::style)]'
for attr in attributes:
for infile in infiles:
if isinstance(infile, lh.HtmlElement):
new_text = infile.xpath('{0}/{1}'.format(text_xpath, attr))
else:
# re.split preserves delimiters place in the list
new_text = [x for x in re.split('(\n)', infile) if x]
text += new_text
if filter_words is not None:
text = re_filter(text, filter_words)
return [''.join(x for x in line if x in string.printable)
for line in remove_whitespace(text) if line]
|
7d2b04c477624db322721b785d95bffa16af1576
| 3,637,201
|
def _check_blacklist_members(rule_members=None, policy_members=None):
"""Blacklist: Check that policy members ARE NOT in rule members.
If a policy member is found in the rule members, add it to the
violating members.
Args:
rule_members (list): IamPolicyMembers allowed in the rule.
policy_members (list): IamPolicyMembers in the policy.
Return:
list: Policy members found in the blacklist (rule members).
"""
violating_members = [
policy_member
for policy_member in policy_members
for rule_member in rule_members
if rule_member.matches(policy_member)
]
return violating_members
|
2fc41f4ff6c401de0976b04dd6a8cb858cef96e7
| 3,637,202
|
def create_variable_weather(weather_data, original_epw_file, columns: list = ['drybulb'], variation: tuple = None):
"""
Create a new weather file adding gaussian noise to the original one.
Parameters
----------
weather_data : opyplus.WeatherData
Opyplus object with the weather for the simulation
original_epw_file : str
Path to the original EPW file
columns : list
List of columns to be affected
variation : tuple
(mean, std) of the Gaussian noise
Return
------
str
Name of the file created in the same location as the original one.
"""
if variation is None:
return None
else:
# Get dataframe with weather series
df = weather_data.get_weather_series()
# Generate random noise
shape = (df.shape[0], len(columns))
mu, std = variation
noise = np.random.normal(mu, std, shape)
df[columns] += noise
# Save new weather data
weather_data.set_weather_series(df)
filename = original_epw_file.split('.epw')[0]
filename += '_Random_%s_%s.epw' % (str(mu), str(std))
weather_data.to_epw(filename)
return filename
|
13674db675cb5c03c77047e78d0cf57b3bfab1ac
| 3,637,203
|
def transform(func, geom):
"""Applies `func` to all coordinates of `geom` and returns a new
geometry of the same type from the transformed coordinates.
`func` maps x, y, and optionally z to output xp, yp, zp. The input
parameters may iterable types like lists or arrays or single values.
The output shall be of the same type. Scalars in, scalars out.
Lists in, lists out.
For example, here is an identity function applicable to both types
of input.
def id_func(x, y, z=None):
return tuple(filter(None, [x, y, z]))
g2 = transform(id_func, g1)
Using pyproj >= 2.1, this example will accurately project Shapely geometries:
import pyproj
wgs84 = pyproj.CRS('EPSG:4326')
utm = pyproj.CRS('EPSG:32618')
project = pyproj.Transformer.from_crs(wgs84, utm, always_xy=True).transform
g2 = transform(project, g1)
Note that the always_xy kwarg is required here as Shapely geometries only support
X,Y coordinate ordering.
Lambda expressions such as the one in
g2 = transform(lambda x, y, z=None: (x+1.0, y+1.0), g1)
also satisfy the requirements for `func`.
"""
if geom.is_empty:
return geom
if geom.type in ('Point', 'LineString', 'LinearRing', 'Polygon'):
# First we try to apply func to x, y, z sequences. When func is
# optimized for sequences, this is the fastest, though zipping
# the results up to go back into the geometry constructors adds
# extra cost.
try:
if geom.type in ('Point', 'LineString', 'LinearRing'):
return type(geom)(zip(*func(*zip(*geom.coords))))
elif geom.type == 'Polygon':
shell = type(geom.exterior)(
zip(*func(*zip(*geom.exterior.coords))))
holes = list(type(ring)(zip(*func(*zip(*ring.coords))))
for ring in geom.interiors)
return type(geom)(shell, holes)
# A func that assumes x, y, z are single values will likely raise a
# TypeError, in which case we'll try again.
except TypeError:
if geom.type in ('Point', 'LineString', 'LinearRing'):
return type(geom)([func(*c) for c in geom.coords])
elif geom.type == 'Polygon':
shell = type(geom.exterior)(
[func(*c) for c in geom.exterior.coords])
holes = list(type(ring)([func(*c) for c in ring.coords])
for ring in geom.interiors)
return type(geom)(shell, holes)
elif geom.type.startswith('Multi') or geom.type == 'GeometryCollection':
return type(geom)([transform(func, part) for part in geom.geoms])
else:
raise ValueError('Type %r not recognized' % geom.type)
|
71bde1500ec8370a7718542ee26181d2aad6591f
| 3,637,204
|
def get_jit(policy_name, asc_location, resource_group_name):
"""Building query
Args:
policy_name: Policy name
asc_location: Machine location
resource_group_name: Resource name group
Returns:
dict: response body
"""
cmd_url = (
"/resourceGroups/{}/providers/Microsoft.Security/locations/{}/jitNetworkAccessPolicies/"
"{}?api-version={}".format(
resource_group_name, asc_location, policy_name, JIT_API_VERSION
)
)
response = http_request("GET", cmd_url)
return response
|
9e50eaf91fb2b2318f6b5334b848a6dce70ddf61
| 3,637,205
|
def rank_by_yield(df):
"""
Rank phenotypes by yield only.
Parameters
----------
df : pd.DataFrame
MAIZSIM yield output dataframe.
df_sims or df_mature
"""
# Prep data
groups = ['cvar', 'site']
how = 'mean'
sim = 'dm_ear'
mx_mean = agg_sims(df, groups, how, sim)
df_yield_means = pd.DataFrame(mx_mean)
# Sort data based on mean yield value
df_yield_means['mean'] = df_yield_means.mean(axis=1)
# Rank phenos by yield
phenos_ranked_by_yield = list(df_yield_means.sort_values(by=['mean'],
axis=0, ascending=False).index)
return phenos_ranked_by_yield
|
10dd1c9a8e3ffc94cf4580bc789d7cc19353d748
| 3,637,206
|
def k2lc(epic):
"""
load k2 light curve
"""
prefix = epic[:4]
id = epic[4:]
c = "01"
path = "data/c01/{0}00000/{1}".format(prefix, id)
end = "kepler_v1.0_lc.fits"
file = "{0}/hlsp_everest_k2_llc_{1}-c{2}_{3}".format(path, epic, c, end)
x, y = process_data(file)
return x, y
|
6cd5ffa387fa3d666c2f6561c06b458c7556509f
| 3,637,207
|
def multi_leave_topics(multileaver, user_id, time):
"""Multileaves a number of suggested topics for a user and returns
the results."""
topics = get_user_suggested_topics(user_id)
if not topics:
return None
ranking, credit = multileaver.team_draft_multileave(topics)
topic_recommendations = []
# prepare results for database insertion
for index, (topic, system) in enumerate(zip(ranking, credit)):
score = multileaver.ranking_length - index
rec = (score, time, user_id, topic, system)
topic_recommendations.append(rec)
return topic_recommendations
|
0b845a7a16419e4592ffd4f75d988728cef70727
| 3,637,208
|
def generate_age(sex):
"""Generate the age of a person depending on its sex
Parameters
----------
sex : int
Sex should be either 0 (men) or 1 (women).
Raises
------
ValueError
If sex is not 0 or 1.
Returns
-------
age : int
Generated age of a person.
"""
randunif = np.random.rand(1)
if sex == 0:
age = menecdf.iloc[(menecdf-randunif).abs().argsort()[:1]].index.tolist()[0]
elif sex == 1:
age = womenecdf.iloc[(womenecdf-randunif).abs().argsort()[:1]].index.tolist()[0]
else:
raise ValueError("Sex should be either 0 (men) or 1 (women)")
return age
|
8f0ba4f215417035760fd1bbe1db5cc0974ed629
| 3,637,209
|
def _extract_text_Wikilink(node: mwparserfromhell.nodes.wikilink.Wikilink) -> str:
"""
Wikilinks come in 2 formats, thumbnails and actual links.
In the case of thumbnails, if posible pull out the nested caption.
"""
if node.title.startswith('File:') or node.title.startswith('Image:'):
if node.text == None:
return ''
else:
return ''.join(filter(lambda x: 'thumb|' not in x, map(_extract_text, node.text.nodes)))
else:
return ''.join(map(_extract_text, node.title.nodes))
|
bc6c16aff602cfeac9756d0c357e054731dc7ff8
| 3,637,210
|
def dict_zip(*dicts):
"""
Take a series of dicts that share the same keys, and reduce the values
for each key as if folding an iterator.
"""
keyset = set(dicts[0])
for d in dicts:
if set(d) != keyset:
raise KeyError(f"Mismatched keysets in fold_dicts: {sorted(keyset)}, {sorted(set(d))}")
return { key: [d[key] for d in dicts] for key in keyset }
|
47416641a6451828b78ae6dfd81a48676fcea71f
| 3,637,211
|
def Ustagger_to_mass(U):
"""
U are the data on the left and right of a grid box
A simple conversion of the U stagger grid to the mass points.
Calculates the average of the left and right value of a grid box. Looping
over all columns it reduces the staggered grid to the same dimensions as the
mass point.
Useful for converting U, XLAT_U, and XLONG_U to masspoints
Differnce between XLAT_U and XLAT is usually small, on order of 10e-5
(column_j1+column_j2)/2 = masspoint_incolumn
Input:
Ugrid with size (##, ##+1)
Output:
U on mass points with size (##,##)
"""
# create the first column manually to initialize the array with correct dimensions
U_masspoint = (U[:,0]+U[:,1])/2. # average of first and second row
U_num_cols = int(U.shape[1])-1 # we want one less column than we have
# Loop through the rest of the columns
# We want the same number of columns as we have rows.
# Take the first and second column, average them, and store in first column in U_masspoint
for col in range(1,U_num_cols):
col_avg = (U[:,col]+U[:,col+1])/2.
# Stack those onto the previous for the final array
U_masspoint = np.column_stack((U_masspoint,col_avg))
return U_masspoint
|
d3dbae52d74aff40b83b0437eed9f0aafb5e37ee
| 3,637,213
|
def _linear(args,
output_size,
bias,
bias_initializer=tf.zeros_initializer(),
kernel_initializer=initializer(),
scope=None,
reuse=None):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_initializer: starting value to initialize the bias
(default is all zeros).
kernel_initializer: starting value to initialize the weight.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: If some of the arguments has unspecified or wrong shape.
"""
if args is None or (nest.is_sequence(args) and not args):
raise ValueError("'args' must be specified")
if not nest.is_sequence(args):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape() for a in args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError('linear is expecting 2D arguments: %s' % shapes)
if shape[1].value is None:
raise ValueError('linear expects shape[1] to be provided for shape %s, '
'but saw %s' % (shape, shape[1]))
else:
total_arg_size += shape[1].value
dtype = [a.dtype for a in args][0]
# Now to computatin.
with tf.variable_scope(scope, reuse=reuse) as outer_scope:
weights = tf.get_variable(
name='linear_kernel',
shape=[total_arg_size, output_size],
dtype=dtype,
regularizer=regularizer,
initializer=initializer)
if len(args) == 1:
res = math_ops.matmul(args[0], weights)
else:
res = math_ops.matmul(array_ops.concat(args, axis=1), weights)
if not bias:
return res
with tf.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = tf.get_variable(
name='linear_bias',
shape=[output_size],
dtype=dtype,
regularizer=regularizer,
initializer=initializer)
return nn_ops.bias_add(res, biases)
|
3d11e74e4e28aeb737f63046fc4e53b4e68aeb9b
| 3,637,214
|
def build_fpn_mask_graph(rois, feature_maps, image_size, num_classes,
pool_size, train_bn=True):
"""Builds the computation graph of the mask head of Feature Pyramid Network.
rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized
coordinates.
feature_maps: List of feature maps from different layers of the pyramid,
[P1, P2, P3, P4, P5]. Each has a different resolution.
image_meta: [batch, (meta data)] Image details. See compose_image_meta()
pool_size: The width of the square feature map generated from ROI Pooling.
num_classes: number of classes, which determines the depth of the results
train_bn: Boolean. Train or freeze Batch Norm layers
Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]
"""
# ROI Pooling
# Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]
x = ROIAlignLayer([pool_size, pool_size],
name="roi_align_mask")((rois, image_size, feature_maps))
# x [1, num_rois, 14, 14, 64]
# Conv layers
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv1")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn1')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv2")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn2')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv3")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn3')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding="same"),
name="mrcnn_mask_conv4")(x)
x = KL.TimeDistributed(BatchNorm(),
name='mrcnn_mask_bn4')(x, training=train_bn)
x = KL.Activation('relu')(x)
x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation="relu"),
name="mrcnn_mask_deconv")(x)
x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation="sigmoid"),
name='mask')(x)
print("pool mask feature map size", x.shape)
return x
|
c6286955fb07d3feb20801a409d44e20382133ef
| 3,637,215
|
def calculate_perf_counter_counter(previous, current, property_name):
"""
PERF_COUNTER_COUNTER
https://technet.microsoft.com/en-us/library/cc740048(v=ws.10).aspx
"""
n0 = previous[property_name]
n1 = current[property_name]
d0 = previous["Timestamp_Sys100NS"]
d1 = current["Timestamp_Sys100NS"]
f = current["Frequency_Sys100NS"]
if n0 is None or n1 is None:
return
return (n1 - n0) / ((d1 - d0) / f)
|
f517f39ef20af5a4d23f1fd74a14fab93be4037b
| 3,637,216
|
import http
from datetime import datetime
def event_edit(request, id):
"""Edit form for a particular event."""
event = get_object_or_404(Event, id=id)
result = can_edit_event(event, request.user)
if isinstance(result, http.HttpResponse):
return result
if request.user.has_perm('main.change_event_others'):
form_class = forms.EventEditForm
elif request.user.has_perm('main.add_event_scheduled'):
form_class = forms.EventExperiencedRequestForm
else:
form_class = forms.EventRequestForm
curated_groups = (
CuratedGroup.objects.filter(event=event).order_by('created')
)
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=event)
if form.is_valid():
event = form.save(commit=False)
_event_process(request, form, event)
if not event.location:
event.start_time = event.start_time.replace(
tzinfo=timezone.utc
)
event.save()
form.save_m2m()
edit_url = reverse('manage:event_edit', args=(event.pk,))
if is_privacy_vidly_mismatch(event):
# We'll need to update the status of token protection
# on Vid.ly for this event.
try:
vidly.update_media_protection(
event.template_environment['tag'],
event.privacy != Event.PRIVACY_PUBLIC,
)
submissions = VidlySubmission.objects.filter(
event=event,
tag=event.template_environment['tag'],
).order_by('-submission_time')
for submission in submissions[:1]:
submission.token_protection = (
event.privacy != Event.PRIVACY_PUBLIC
)
submission.save()
break
except vidly.VidlyUpdateError as x:
messages.error(
request,
'Video protect status could not be updated on '
'Vid.ly\n<code>%s</code>' % x
)
messages.info(
request,
'Event "<a href=\"%s\">%s</a>" saved. [Edit again](%s)' % (
reverse('main:event', args=(event.slug,)),
event.title,
edit_url
)
)
return redirect('manage:events')
else:
initial = {}
initial['curated_groups'] = curated_groups.values_list(
'name',
flat=True
)
curated_groups_choices = [
(x, x) for x in initial['curated_groups']
]
form = form_class(
instance=event,
initial=initial,
curated_groups_choices=curated_groups_choices,
)
context = {
'form': form,
'event': event,
'suggested_event': None,
'suggested_event_comments': None,
'tweets': EventTweet.objects.filter(event=event).order_by('id'),
}
try:
suggested_event = SuggestedEvent.objects.get(accepted=event)
context['suggested_event'] = suggested_event
context['suggested_event_comments'] = (
SuggestedEventComment.objects
.filter(suggested_event=suggested_event)
.select_related('user')
.order_by('created')
)
except SuggestedEvent.DoesNotExist:
pass
context['is_vidly_event'] = False
if event.template and 'Vid.ly' in event.template.name:
context['is_vidly_event'] = True
context['vidly_submissions'] = (
VidlySubmission.objects
.filter(event=event)
.order_by('-submission_time')
)
# Is it stuck and won't auto-archive?
context['stuck_pending'] = False
now = timezone.now()
time_ago = now - datetime.timedelta(minutes=15)
if (
event.status == Event.STATUS_PENDING and
event.template and
'Vid.ly' in event.template.name and
event.template_environment and # can be None
event.template_environment.get('tag') and
not VidlySubmission.objects.filter(
event=event,
submission_time__gte=time_ago
)
):
tag = event.template_environment['tag']
results = vidly.query(tag)
status = results.get(tag, {}).get('Status')
if status == 'Finished':
context['stuck_pending'] = True
try:
discussion = Discussion.objects.get(event=event)
context['discussion'] = discussion
context['comments_count'] = Comment.objects.filter(event=event).count()
except Discussion.DoesNotExist:
context['discussion'] = None
context['approvals'] = (
Approval.objects
.filter(event=event)
.select_related('group')
)
context['chapters_count'] = Chapter.objects.filter(event=event).count()
context['closed_captions'] = ClosedCaptions.objects.filter(event=event)
try:
context['assignment'] = EventAssignment.objects.get(event=event)
except EventAssignment.DoesNotExist:
context['assignment'] = None
try:
context['survey'] = Survey.objects.get(events=event)
except Survey.DoesNotExist:
context['survey'] = None
context['archived_hits'] = 0
context['live_hits'] = 0
for each in EventHitStats.objects.filter(event=event).values('total_hits'):
context['archived_hits'] += each['total_hits']
for each in EventLiveHits.objects.filter(event=event).values('total_hits'):
context['live_hits'] += each['total_hits']
context['count_event_uploads'] = Upload.objects.filter(event=event).count()
context['vidly_tag_domains'] = None
if (
event.template and
'Vid.ly' in event.template.name and
event.template_environment and
event.template_environment.get('tag')
):
context['vidly_tag_domains'] = VidlyTagDomain.objects.filter(
tag=event.template_environment['tag']
)
return render(request, 'manage/event_edit.html', context)
|
0ea47e7b1772c3fa0529f6cc7675a83108ad0018
| 3,637,217
|
from typing import Callable
def migrator(from_: str, to_: str) -> Callable[[MigratorF], MigratorF]:
"""Decorate function as migrating settings from v `from_` to v `to_`.
A migrator should mutate a `NapariSettings` model from schema version
`from_` to schema version `to_` (in place).
Parameters
----------
from_ : str
NapariSettings.schema_version version that this migrator expects as
input
to_ : str
NapariSettings.schema_version version after this migrator has been
executed.
Returns
-------
Callable[ [MigratorF], MigratorF ]
_description_
"""
def decorator(migrate_func: MigratorF) -> MigratorF:
_from, _to = Version.parse(from_), Version.parse(to_)
assert _to >= _from, 'Migrator must increase the version.'
_MIGRATORS.append(Migrator(_from, _to, migrate_func))
return migrate_func
return decorator
|
20bf5c7c8e693fc880ed9d31e610b2d939f8c020
| 3,637,218
|
import json
def rawChipByLocation_query():
"""
Get chips images by parcel id.
Generates a series of extracted Sentinel-2 LEVEL2A segments of 128x128 (10m
resolution bands) or 64x64 (20 m) pixels as list of full resolution GeoTIFFs
---
tags:
- rawChipByLocation
responses:
200:
description: A JSON dictionary with date labels and
relative URLs to cached GeoTIFFs.
"""
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
# Start by getting the request IP address
if request.environ.get('HTTP_X_FORWARDED_FOR') is None:
rip = request.environ['REMOTE_ADDR']
else:
rip = request.environ['HTTP_X_FORWARDED_FOR']
lon = request.args.get('lon')
lat = request.args.get('lat')
start_date = request.args.get('start_date')
end_date = request.args.get('end_date')
band = request.args.get('band')
if 'plevel' in request.args.keys():
plevel = request.args.get('plevel')
else:
plevel = 'LEVEL2A'
if 'chipsize' in request.args.keys():
chipsize = request.args.get('chipsize')
else:
chipsize = '1280'
unique_id = f"dump/{rip}E{lon}N{lat}_{plevel}_{chipsize}_{band}".replace(
'.', '_')
data = image_requests.getRawChipByLocation(
lon, lat, start_date, end_date, unique_id, band, chipsize, plevel)
if data:
return send_from_directory(f"files/{unique_id}", 'dump.json')
else:
return json.dumps({})
|
bfb2b32a17d5b1b8a05efcf710d70fc4179996c5
| 3,637,219
|
from pathlib import Path
from typing import Optional
from typing import List
import json
def build_settings(
tmp_path: Path,
template: str,
*,
oidc_clients: Optional[List[OIDCClient]] = None,
**settings: str,
) -> Path:
"""Generate a test Gafaelfawr settings file with secrets.
Parameters
----------
tmp_path : `pathlib.Path`
The root of the temporary area.
template : `str`
Settings template to use.
oidc_clients : List[`gafaelfawr.config.OIDCClient`] or `None`
Configuration information for clients of the OpenID Connect server.
**settings : `str`
Any additional settings to add to the settings file.
Returns
-------
settings_path : `pathlib.Path`
The path of the settings file.
"""
bootstrap_token = str(Token()).encode()
bootstrap_token_file = store_secret(tmp_path, "bootstrap", bootstrap_token)
session_secret = Fernet.generate_key()
session_secret_file = store_secret(tmp_path, "session", session_secret)
issuer_key = _ISSUER_KEY.private_key_as_pem()
issuer_key_file = store_secret(tmp_path, "issuer", issuer_key)
influxdb_secret_file = store_secret(tmp_path, "influxdb", b"influx-secret")
github_secret_file = store_secret(tmp_path, "github", b"github-secret")
oidc_secret_file = store_secret(tmp_path, "oidc", b"oidc-secret")
oidc_path = tmp_path / "oidc.json"
if oidc_clients:
clients_data = [
{"id": c.client_id, "secret": c.client_secret}
for c in oidc_clients
]
oidc_path.write_text(json.dumps(clients_data))
settings_path = _build_settings_file(
tmp_path,
template,
database_url=TEST_DATABASE_URL,
bootstrap_token_file=bootstrap_token_file,
session_secret_file=session_secret_file,
issuer_key_file=issuer_key_file,
github_secret_file=github_secret_file,
oidc_secret_file=oidc_secret_file,
influxdb_secret_file=influxdb_secret_file,
oidc_server_secrets_file=oidc_path if oidc_clients else "",
)
if settings:
with settings_path.open("a") as f:
for key, value in settings.items():
f.write(f"{key}: {value}\n")
return settings_path
|
aaba1048c96cd07b42492d11ca34a87365350a20
| 3,637,221
|
def get_actions_matching_arn(arn):
"""
Given a user-supplied ARN, get a list of all actions that correspond to that ARN.
Arguments:
arn: A user-supplied arn
Returns:
List: A list of all actions that can match it.
"""
raw_arns = get_matching_raw_arns(arn)
results = []
for raw_arn in raw_arns:
resource_type_name = get_resource_type_name_with_raw_arn(raw_arn)
service_prefix = get_service_from_arn(raw_arn)
service_prefix_data = get_service_prefix_data(service_prefix)
for action_name, action_data in service_prefix_data["privileges"].items():
# for some_action in service_prefix_data["privileges"]:
for resource_name, resource_data in action_data["resource_types"].items():
this_resource_type = resource_data["resource_type"].strip("*")
if this_resource_type.lower() == resource_type_name.lower():
results.append(f"{service_prefix}:{action_data['privilege']}")
results = list(dict.fromkeys(results))
results.sort()
return results
|
595e985829df5035c81928a4441c64b136818e8d
| 3,637,223
|
import time
def api_retry(func, task_id):
"""
添加api重试机制
:param func: 调用的api函数
:param task_id: 任务id
:return: 重试结果
"""
retry_flag, status_result = False, ""
for i in range(TRANSPORT_RETRY_TIMES):
time.sleep(TRANSPORT_RETRY_INTERVAL)
retry_flag, status_result = func(task_id)
if retry_flag:
break
return retry_flag, status_result
|
8f3ad5d6c9865ec8405c7504e9a6af851f5e4916
| 3,637,226
|
def test_bound_callables():
"""Test that we can use a callable as a bound value."""
@magicgui(x={"bind": lambda x: 10})
def f(x: int = 5):
return x
assert f() == 10
f.x.unbind()
assert f() == 5
|
baf0cafcef7160e23c1b66be9734245adbe9d219
| 3,637,227
|
def delete_role(user_id: str, role_id: str):
""" Removes a role from a user """
print(user_id)
print(role_id)
return jsonify(), HTTPStatus.NO_CONTENT
|
aa97868d54f0f3b887d80a7f4f8ef258fb050001
| 3,637,228
|
import multiprocessing as mp
from functools import partial
from jinfo.utils.percentage_identity import percentage_identity
def remove_degenerate_seqs(
alignment_obj: BaseAlignment, identity_limit: int, show_id_array: bool = False
) -> BaseAlignment:
"""
Filter high similarity sequences from a list of Seq objects
Returns: BaseAlignment
"""
seq_list = alignment_obj.seqs
identity_array = []
filtered_seqs = []
pool = mp.Pool(mp.cpu_count()) # Set up cpu pool for parallel calculation
for seq_obj in seq_list:
id_partial = partial(percentage_identity, seq2=seq_obj)
identity_array_row = pool.map(id_partial, seq_list)
identity_array.append(identity_array_row)
if show_id_array:
print("Calculated alignment identity array:")
for i, row in enumerate(identity_array):
print(f"{seq_list[i].label}\t{row}")
for i, row in enumerate(identity_array):
row.remove(100) # remove seq 100% match with itself
if max(row) < float(identity_limit):
filtered_seqs.append(seq_list[i])
return BaseAlignment(filtered_seqs)
|
fcada477a01290fb54a83d074c31de31c9be17e1
| 3,637,229
|
def get_domain(url):
""" Get the domain from a URL.
Parameters
----------
url : string
HTTP URL
Returns
-------
domain : string
domain of the URL
"""
o = urlparse(url)
scheme = o.scheme
if not o.scheme:
scheme = "http"
link = scheme + "://" + o.netloc
return link
|
e47d2fdedab66d356887a94db5c22770f5e21823
| 3,637,230
|
def push_activations(activations, from_layer, to_layer):
"""Push activations from one model to another using prerecorded correlations"""
inverse_covariance_matrix = layer_inverse_covariance(from_layer)
activations_decorrelated = np.dot(inverse_covariance_matrix, activations.T).T
covariance_matrix = layer_covariance(from_layer, to_layer)
activation_recorrelated = np.dot(activations_decorrelated, covariance_matrix)
return activation_recorrelated
|
ddbacdbbfb30156204df27b00c79a28a4895810e
| 3,637,231
|
def cross_val_score(estimator, X, y=None, groups=None, scoring=None, cv=None,
n_jobs=1, verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score using cross-validation
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be for example a list, or an array.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : numpy.array, shape=(len(list(cv)), 2)
Array of scores of the estimator for each run of the cross validation
with their corresponding uncertainty.
See Also
---------
:func:`skpro.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
# To ensure multimetric format is not supported
scorer = check_scoring(estimator, scoring=scoring)
if n_jobs == 1:
# If we are not multiprocessing it's possible to
# use a wrapper function to retrieve the std values
test_scores = []
def scoring_task(estimator, X, y):
score, std = scorer(estimator, X, y, return_std=True)
test_scores.append([score, std])
return score
else:
# We allow multiprocessing by passing in two scoring functions.
# That is far from ideal since we call the scorer twice,
# so any improvement is welcome
score_scorer = RetrievesScores(scorer, score=True, std=False)
std_scorer = RetrievesScores(scorer, score=False, std=True)
scoring_task = {'score': score_scorer, 'std': std_scorer}
cv_results = cross_validate(estimator=estimator, X=X, y=y, groups=groups,
scoring=scoring_task, cv=cv,
return_train_score=False,
n_jobs=n_jobs, verbose=verbose,
fit_params=fit_params,
pre_dispatch=pre_dispatch)
if n_jobs == 1:
return np.array(test_scores)
else:
return np.column_stack((cv_results['test_score'], cv_results['test_std']))
|
8c4fe69cb2043adf5541b188d99da23beb4d9874
| 3,637,232
|
def load_graph(N, M):
"""
Builds an adjacency list representation of a graph with N vertices. Each
graph[i][j] is the minimum length of an edge between vertice i and j.
:rtype List[int, Dict[int, int]]
"""
graph = [dict() for i in range(0, N)]
for i in range(0, M):
(x, y, r) = read(int)
x -= 1
y -= 1
# Ignore all edges except minimum length edge.
r = r if y not in graph[x] else min(r, graph[x][y])
graph[x][y] = r
graph[y][x] = r
return graph
|
acee5cb79eb5bbc04eada9d54344700bff3ffaa4
| 3,637,233
|
def quartile_range(arr):
"""
Find out the Interquartile Range
"""
#if it is odd
if len(arr)%2 != 0:
left=median(arr[:len(arr)/2])
right=median(arr[len(arr)/2 + 1:])
else:
#if array is even
left = median(arr[:len(arr)/2])
right = median(arr[len(arr)/2:])
return left, abs(right - left), right
|
c6fafd63e3e64b893a4632bfb2027e330e8a3c32
| 3,637,234
|
def check_syntax(filename, raise_error=False):
"""Return True if syntax is okay."""
with autopep8.open_with_encoding(filename) as input_file:
try:
compile(input_file.read(), '<string>', 'exec', dont_inherit=True)
return True
except (SyntaxError, TypeError, UnicodeDecodeError):
if raise_error:
raise
else:
return False
|
d401e292ddb20d66c65a7ffa8988ddab4a7962ec
| 3,637,235
|
from astropy.convolution import convolve as astropy_convolve
from ..utils import process_image_pixels
def test_process_image_pixels():
"""Check the example how to implement convolution given in the docstring"""
def convolve(image, kernel):
'''Convolve image with kernel'''
images = dict(image=np.asanyarray(image))
kernel = np.asanyarray(kernel)
out = dict(image=np.empty_like(image))
def convolve_function(images, kernel):
value = np.sum(images['image'] * kernel)
return dict(image=value)
process_image_pixels(images, kernel, out, convolve_function)
return out['image']
np.random.seed(0)
image = np.random.random((7, 10))
kernel = np.random.random((3, 5))
actual = convolve(image, kernel)
desired = astropy_convolve(image, kernel, boundary='fill')
assert_allclose(actual, desired)
|
439e45a7fd403de4df8dd9dfc662be6405d69dc0
| 3,637,236
|
import math
def im2vec(im, bsize, padsize=0):
"""
Converts image to vector.
Args:
im: Input image to be converted to a vector.
bsize: Size of block of im to be converted to vec. Must be 1x2 non-negative int array.
padsize (optional, default=0): Must be non-negative integers in a 1x2 array. Amount of zeros padded on each
Returns:
v: Output vector.
rows: Number of rows of im after bsize and padsize are applied (before final flattening to vector).
cols: Number of cols of im after bsize and padsize are applied (before final flattening to vector).
"""
bsize = bsize+np.zeros((1, 2), dtype=int)[0]
padsize = padsize+np.zeros((1, 2), dtype=int)[0]
if(padsize.any() < 0):
raise Exception("Pad size must not be negative")
imsize = np.shape(im)
y = bsize[0]+padsize[0]
x = bsize[1]+padsize[1]
rows = math.floor((imsize[0]+padsize[0])/y)
cols = math.floor((imsize[1]+padsize[1])/x)
t = np.zeros((y*rows, x*cols))
imy = y*rows-padsize[0]
imx = x*cols-padsize[1]
t[:imy, :imx] = im[:imy, :imx]
t = np.reshape(t, (y, rows, x, cols), order='F')
t = np.reshape(np.transpose(t, [0, 2, 1, 3]), (y, x, rows*cols), order='F')
v = t[:bsize[0], :bsize[1], :rows*cols]
v = np.reshape(v, (y*x, rows*cols), order='F')
return [v, rows, cols]
|
0a88cf02e37fdaeb24103cc0a7027067ea703c82
| 3,637,237
|
def mobilenetV2_block(
input_layer,
filters: int = 32,
dropout_ratio: float = DEFAULT_DROPOUT_RATIO,
use_batchnorm: bool = False,
prefix: str = "mobilenetV2_",
initializer=DEFAULT_KERNEL_INITIALIZER,
regularizer=DEFAULT_KERNEL_REGULARIZER,
channels_index: int = DEFAULT_CHANNEL_INDEX):
"""
Build a mobilenet V2 bottleneck with residual block
:param input_layer:
:param filters:
:param initializer:
:param regularizer:
:param prefix:
:param channels_index:
:param use_batchnorm:
:param dropout_ratio:
:return: mobilenet V2 bottleneck with residual block
"""
# --- argument checking
if input_layer is None:
raise ValueError("input_layer cannot be empty")
if filters <= 0:
raise ValueError("Filters should be > 0")
if dropout_ratio is not None:
if dropout_ratio > 1.0 or dropout_ratio < 0.0:
raise ValueError("Dropout ration must be [0, 1]")
# --- build block
previous_no_filters = K.int_shape(input_layer)[channels_index]
x = keras.layers.Conv2D(
filters=filters,
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
activation="linear",
name=prefix + "conv0",
kernel_regularizer=regularizer,
kernel_initializer=initializer)(input_layer)
x = keras.layers.DepthwiseConv2D(
depth_multiplier=1,
kernel_size=(3, 3),
strides=(1, 1),
padding="same",
activation="relu",
name=prefix + "conv1",
kernel_regularizer=regularizer,
kernel_initializer=initializer)(x)
if use_batchnorm:
x = keras.layers.BatchNormalization(
name=prefix + "batchnorm0")(x)
x = keras.layers.Conv2D(
filters=previous_no_filters,
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
activation="relu",
name=prefix + "conv2",
kernel_regularizer=regularizer,
kernel_initializer=initializer)(x)
if use_batchnorm:
x = keras.layers.BatchNormalization(
name=prefix + "batchnorm1")(x)
# --- build skip layer and main
x = keras.layers.Add(name=prefix + "add")([
x,
input_layer
])
if dropout_ratio is not None and dropout_ratio > 0.0:
x = keras.layers.Dropout(
name=prefix + "dropout",
rate=dropout_ratio)(x)
return x
|
0fd2e38d32d192412de4928c7ef92b577235581f
| 3,637,238
|
def _grid_archive():
"""Deterministically created GridArchive."""
# The archive must be low-res enough that we can tell if the number of cells
# is correct, yet high-res enough that we can see different colors.
archive = GridArchive([10, 10], [(-1, 1), (-1, 1)], seed=42)
archive.initialize(solution_dim=2)
_add_uniform_sphere(archive, (-1, 1), (-1, 1))
return archive
|
540ea0270bbe06830ab096c79590c6ffcad487a2
| 3,637,239
|
def check_flush(hand):
"""Check whether the hand has a flush; returns a boolean."""
if len(hand) == len(hand.by_suit(hand[0].suit)):
return True
return False
|
de11f50f11b477e61f284063c7f0da0dda2dd87e
| 3,637,240
|
import torch
def binary_accuracy(preds, y):
"""
Returns accuracy per batch
:param preds: prediction logits
:param y: target labels
:return: accuracy = percentage of correct predictions
"""
# round predictions to the closest integer
rounded_predictions = torch.round(torch.sigmoid(preds))
correct = (rounded_predictions == y).float()
acc = correct.sum() / len(correct)
return acc
|
2a321bb9e60a937a879619c2fa3baf1cbe968a33
| 3,637,241
|
import csv
def load_taxondump(idpath):
"""Importing the Acidobacteria taxon IDs"""
taxons = {}
with open(idpath) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
taxons[row[1]] = row[0]
return taxons
|
b20c973f97d609b646e5c15be7cc320019f21236
| 3,637,242
|
import re
def _to_numeric_range(cell):
"""
Translate an Excel cell (eg 'A1') into a (col, row) tuple indexed from zero.
e.g. 'A1' returns (0, 0)
"""
match = re.match("^\$?([A-Z]+)\$?(\d+)$", cell.upper())
if not match:
raise RuntimeError("'%s' is not a valid excel cell address" % cell)
col, row = match.groups()
# A = 1
col_digits = map(lambda c: ord(c) - ord("A") + 1, col)
col = 0
for digit in col_digits:
col = (col * 26) + digit
row = int(row) - 1
col = col - 1
return col, row
|
468f452a7e4d4b045ecbb1a1fc261712fb25f3fc
| 3,637,243
|
def protocol(recarr, design_type, *hrfs):
""" Create an object that can evaluate the FIAC
Subclass of formulae.Formula, but not necessary.
Parameters
----------
recarr : (N,) structured array
with fields 'time' and 'event'
design_type : str
one of ['event', 'block']. Handles how the 'begin' term is
handled. For 'block', the first event of each block is put in
this group. For the 'event', only the first event is put in this
group. The 'begin' events are convolved with hrf.glover.
hrfs: symoblic HRFs
Each event type ('SSt_SSp','SSt_DSp','DSt_SSp','DSt_DSp') is
convolved with each of these HRFs in order.
Returns
-------
f: Formula
Formula for constructing design matrices.
contrasts : dict
Dictionary of the contrasts of the experiment.
"""
event_types = np.unique(recarr['event'])
N = recarr.size
if design_type == 'block':
keep = np.not_equal((np.arange(N)) % 6, 0)
else:
keep = np.greater(np.arange(N), 0)
# This first frame was used to model out a potentially
# 'bad' first frame....
_begin = recarr['time'][~keep]
termdict = {}
termdict['begin'] = utils.define('begin', utils.events(_begin, f=hrf.glover))
drift = formulae.natural_spline(utils.T,
knots=[N_ROWS/2.+1.25],
intercept=True)
for i, t in enumerate(drift.terms):
termdict['drift%d' % i] = t
# After removing the first frame, keep the remaining
# events and times
times = recarr['time'][keep]
events = recarr['event'][keep]
# Now, specify the experimental conditions. This creates expressions named
# SSt_SSp0, SSt_SSp1, etc. with one expression for each (eventtype, hrf)
# pair
for v in event_types:
k = np.array([events[i] == v for i in range(times.shape[0])])
for l, h in enumerate(hrfs):
# Make sure event type is a string (not byte string)
term_name = '%s%d' % (to_str(v), l)
termdict[term_name] = utils.define(term_name,
utils.events(times[k], f=h))
f = formulae.Formula(termdict.values())
Tcontrasts = {}
Tcontrasts['average'] = (termdict['SSt_SSp0'] + termdict['SSt_DSp0'] +
termdict['DSt_SSp0'] + termdict['DSt_DSp0']) / 4.
Tcontrasts['speaker'] = (termdict['SSt_DSp0'] - termdict['SSt_SSp0'] +
termdict['DSt_DSp0'] - termdict['DSt_SSp0']) * 0.5
Tcontrasts['sentence'] = (termdict['DSt_DSp0'] + termdict['DSt_SSp0'] -
termdict['SSt_DSp0'] - termdict['SSt_SSp0']) * 0.5
Tcontrasts['interaction'] = (termdict['SSt_SSp0'] - termdict['SSt_DSp0'] -
termdict['DSt_SSp0'] + termdict['DSt_DSp0'])
# Ftest
Fcontrasts = {}
Fcontrasts['overall1'] = formulae.Formula(Tcontrasts.values())
return f, Tcontrasts, Fcontrasts
|
d2ce4b35614ca692226133ec72b4f1d46baf065c
| 3,637,245
|
def iter_children(param,childlist=[]):
"""
| Iterator over all sub children of a given parameters.
| Returns all childrens names.
=============== ================================= ====================================
**Parameters** **Type** **Description**
*param* instance of pyqtgraph parameter the root node to be coursed
*childlist* list the child list recetion structure
=============== ================================= ====================================
Returns
-------
childlist : parameter list
The list of the children from the given node.
Examples
--------
>>> import custom_parameter_tree as cpt
>>> from pyqtgraph.parametertree import Parameter
>>> #Creating the example tree
>>> settings=Parameter(name='settings')
>>> child1=Parameter(name='child1', value=10)
>>> child2=Parameter(name='child2',value=10,visible=True,type='group')
>>> child2_1=Parameter(name='child2_1', value=10)
>>> child2_2=Parameter(name='child2_2', value=10)
>>> child2.addChildren([child2_1,child2_2])
>>> settings.addChildren([child1,child2])
>>> #Get the child list from the param argument
>>> childlist=cpt.iter_children(settings)
>>> #Verify the integrity of result
>>> print(childlist)
['child1', 'child2', 'child2_1', 'child2_2']
"""
for child in param.children():
childlist.append(child.name())
if child.type()=='group':
childlist.extend(iter_children(child,[]))
return childlist
|
2edbdccc5957cbe6131da70d6dfc24ea67a19e69
| 3,637,246
|
def depth_first_graph_search(problem):
"""
[Figure 3.7]
Search the deepest nodes in the search tree first.
Search through the successors of a problem to find a goal.
The argument frontier should be an empty queue.
Does not get trapped by loops.
If two paths reach a state, only use the first one.
"""
frontier = [(Node(problem.initial))] # Stack
explored = set()
while frontier:
node = frontier.pop()
if problem.goal_test(node.state):
return node
explored.add(node.state)
frontier.extend(child for child in node.expand(problem)
if child.state not in explored and child not in frontier)
return None
|
d610752a99a8c4e7f1b5eee2d520d88f868279eb
| 3,637,248
|
def check_matrix_equality(A, B, tol=None):
"""
Checks the equality of two matrices.
:param A: The first matrix
:param B: The second matrix
:param tol: The decimal place tolerance of the check
:return: The boolean result of the equality check
"""
if len(A) != len(B) or len(A[0]) != len(B[0]):
return False
for i in range(len(A)):
for j in range(len(A[0])):
if tol == None:
if A[i][j] != B[i][j]:
return False
else:
if round(A[i][j], tol) != round(B[i][j], tol):
return False
return True
|
afc89de848597c6325b6eceb109f7f2311c9be7d
| 3,637,249
|
def about(topic):
"""Return a select function that returns whether
a paragraph contains one of the words in TOPIC.
Arguments:
topic: a list of words related to a subject
>>> about_dogs = about(['dog', 'dogs', 'pup', 'puppy'])
>>> choose(['Cute Dog!', 'That is a cat.', 'Nice pup!'], about_dogs, 0)
'Cute Dog!'
>>> choose(['Cute Dog!', 'That is a cat.', 'Nice pup.'], about_dogs, 1)
'Nice pup.'
"""
assert all([lower(x) == x for x in topic]), 'topics should be lowercase.'
# BEGIN PROBLEM 2
def func(p):
p = [remove_punctuation(lower(x)) for x in split(p)]
for x in topic:
for y in p:
if x == y:
return True
return False
return func
# END PROBLEM 2
|
b73512058675ac9a17a8d5cd36ab544080a2acbe
| 3,637,250
|
def calcBarycentricCoords(pt, verts):
"""calculate the Barycentric coordinates"""
verts = np.array(verts) # vertices formed by N+1 nearest voxels
pt = np.array(pt) # voxel of interest
A = np.transpose(np.column_stack((verts, np.ones(verts.shape[0]))))
b = np.append(pt, 1)
return np.linalg.lstsq(A, b)[0]
|
5869c40d9b95280d3db77dd7eb3a42fab46c45a8
| 3,637,251
|
import re
def parse_regex(ctx, param, values):
"""Compile a regex if given.
:param click.Context ctx: click command context.
:param click.Parameter param: click command parameter (in this case,
``ignore_regex`` from ``-r|--ignore-regiex``).
:param list(str) values: list of regular expressions to be compiled.
:return: a list of compiled regular expressions.
.. versionchanged:: 1.1.3 parameter value (``values``) must be a
``list`` of ``str``s.
"""
if not values:
return
return [re.compile(v) for v in values]
|
b920d5a406ac3b7a8f28bb9125313c90eec5e212
| 3,637,253
|
def get_query_string(**kwargs):
"""
Concatenates the non-None keyword arguments to create a query string for ElasticSearch.
:return: concatenated query string or None if not arguments were given
"""
q = ['%s:%s' % (key, value) for key, value in kwargs.items() if value not in (None, '')]
return ' AND '.join(q) or None
|
cc73c157a8975e5df9c98efcd5b10396e5175486
| 3,637,256
|
def check_bin(img):
"""Checks whether image has been properly binarized. NB: works on the assumption that there should be more
background pixels than element pixels.
Parameters
----------
img : np.ndarray
Description of parameter `img`.
Returns
-------
np.ndarray
A binary array of the image.
"""
img_bool = np.asarray(img, dtype=np.bool)
# Gets the unique values in the image matrix. Since it is binary, there should only be 2.
unique, counts = np.unique(img_bool, return_counts=True)
print(unique)
print("Found this many counts:")
print(len(counts))
print(counts)
# If the length of unique is not 2 then print that the image isn't a binary.
if len(unique) != 2:
print("Image is not binarized!")
hair_pixels = len(counts)
print("There is/are {} value(s) present, but there should be 2!\n".format(hair_pixels))
# If it is binarized, print out that is is and then get the amount of hair pixels to background pixels.
if counts[0] < counts[1]:
print("{} is not reversed".format(str(img)))
img = skimage.util.invert(img_bool)
print("Now {} is reversed =)".format(str(img)))
return img
else:
print("{} is already reversed".format(str(img)))
img = img_bool
print(type(img))
return img
|
808e4635befa5848d7683e6e12ead5b5ee297339
| 3,637,257
|
def add_quotes(path):
"""Return quotes if needed for spaces on path."""
quotes = '"' if ' ' in path and '"' not in path else ''
return '{quotes}{path}{quotes}'.format(quotes=quotes, path=path)
|
6e65da4512183ef62a0ac22b4c3c74f9e5273fbd
| 3,637,258
|
def terminal(board):
"""
Returns True if game is over, False otherwise.
"""
if len(actions(board)) == 0:
return True
if winner(board) is not None:
return True
return False
#raise NotImplementedError
|
6776ad6a261dd8dd90abbb6abb5fa428f8149bba
| 3,637,259
|
def login():
"""LogIn Page"""
if request.method == "GET":
return render_template("login.html")
email = request.form.get("email")
password = request.form.get("password")
remember = bool(request.form.get("remember"))
user = User.query.filter_by(email=email).first()
if not user or not check_password_hash(user.password, password):
flash("Please check your login details and try again.")
return redirect(url_for("auth.login"))
login_user(user, remember=remember)
return redirect(url_for("main.games"))
|
3db9447298ca149037cdac89e850e893d5f9ac37
| 3,637,260
|
from typing import List
from operator import not_
def apply_modifiers(membership: npt.ArrayLike, modifiers: List[str]) -> npt.ArrayLike:
"""
Apply a list of modifiers or hedges to a numpy array.
:param membership: Membership values to be modified.
:param modifiers: List of modifiers or hedges.
>>> from fuzzy_expert.operators import apply_modifiers
>>> x = [0.0, 0.25, 0.5, 0.75, 1]
>>> apply_modifiers(x, ('not', 'very'))
array([1. , 0.9375, 0.75 , 0.4375, 0. ])
"""
if modifiers is None:
return membership
fn = {
"EXTREMELY": extremely,
"INTENSIFY": intensify,
"MORE_OR_LESS": more_or_less,
"NORM": norm,
"NOT": not_,
"PLUS": plus,
"SLIGHTLY": slightly,
"SOMEWHAT": somewhat,
"VERY": very,
}
membership = membership.copy()
modifiers = list(modifiers)
modifiers.reverse()
for modifier in modifiers:
membership = fn[modifier.upper()](membership)
return membership
|
6140646bc5943ba7c7b6ce597e033c9797ba5ab4
| 3,637,261
|
def unitY(m=1.0):
"""Return an unit vector on Y"""
return np.array((0, m, 0))
|
fda046e085e9ab00d263ec7f5569bcd719113c5d
| 3,637,262
|
def create_suction_model(radius):
"""Create a suction model"""
hm = np.zeros((2 * radius + 1, 2 * radius + 1))
hm1 = np.tile(np.arange(-radius, radius + 1), (2 * radius + 1, 1))
hm2 = hm1.T
d = np.sqrt(hm1**2 + hm2**2)
return np.where(d < radius, 1, 0).astype(np.float64)
|
df8e34b0b8957169099740dc74d07c813056dfc4
| 3,637,263
|
def model_entrypoint(model_name):
"""Fetch a model entrypoint for specified model name
"""
return _model_entrypoints[model_name]
|
8c1658f07db87e99ffbde428bc55281b6b185639
| 3,637,264
|
def encrypt(data, password):
"""Enrcrypt data and return content in binary"""
try:
cipher = AES.new(password.encode(), AES.MODE_CBC)
cypher_text_bytes = cipher.encrypt(pad(data.encode(), AES.block_size))
return b'' + cipher.iv + b':' + cypher_text_bytes
except ValueError:
print("There was an error")
raise ValueError
|
2e4719cc48ded4f8c5400bfb5ab583a229034261
| 3,637,265
|
from datetime import datetime
def change_datetime_to_str(input_time=None, str_format="%Y-%m-%d"):
"""
:param input_time: 指定需要转换的时间, 默认当前时间
:param str_format: 字符时间的格式, 默认%Y-%m-%d
:return:
"""
spec_time = input_time or datetime.datetime.now()
return spec_time.strftime(str_format)
|
f0f3a72ee05b41dbeec12b05a89a26542fcefb21
| 3,637,266
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.