content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def ns_diff(newstr, oldstr):
"""
Calculate the diff.
"""
if newstr == STATUS_NA:
return STATUS_NA
# if new is valid but old is not we should return new
if oldstr == STATUS_NA:
oldstr = '0'
new, old = int(newstr), int(oldstr)
return '{:,}'.format(max(0, new - old))
|
bbf58a649ef71524e7413fb47501d0054b828919
| 3,643,084
|
def get_crab(registry):
"""
Get the Crab Gateway
:rtype: :class:`crabpy.gateway.crab.CrabGateway`
# argument might be a config or a request
"""
# argument might be a config or a request
regis = getattr(registry, 'registry', None)
if regis is None:
regis = registry
return regis.queryUtility(ICrab)
|
6f8f02ac4bf7e82c8f4828fb4fdab4b78451ae49
| 3,643,085
|
def create_meal():
"""Create a new meal.
---
tags:
- meals
parameters:
- in: body
name: body
schema:
id: Meal
properties:
name:
type: string
description: the name of the meal
description:
type: string
description: the description of the meal
price:
type: number
format: float
description: the cost of the meal
scheduled_for:
type: string
format: date-time
description: the date time that the meal is scheduled for
responses:
201:
description: Meal was successfully created
schema:
id: Meal
401:
description: The user is not authenticated
422:
description: The data failed validation
428:
description: The current user has not added their address
"""
if current_user.location is None:
raise PreconditionRequired(Errors.LOCATION_NOT_CREATED_YET)
meal_data = MEAL_SCHEMA.load(request.json).data
if 'tags' in meal_data:
tags = meal_data.pop('tags')
meal = Meal.create(location_id=current_user.location.id, **meal_data)
meal.tags = tags
else:
meal = Meal.create(location_id=current_user.location.id, **meal_data)
return jsonify(data=MEAL_SCHEMA.dump(meal).data,
message=Success.MEAL_CREATED), 201
|
ee1c235410d7d6ca9f3661ea9f7a1f9fb434a730
| 3,643,086
|
def buscaBinariaIterativa(alvo, array):
""" Retorna o índice do array em que o elemento alvo está contido.
Considerando a coleção recebida como parâmetro, identifica e retor-
na o índice em que o elemento especificado está contido. Caso esse
elemento não esteja presente na coleção, retorna -1. Utiliza uma
abordagem iterativa.
Parameters
----------
alvo : ?
Elemento cujo índice está sendo buscado
array : list
A lista cujo índice do elemento deve ser identificado
Return
------
index : int
O índice em que o elemento alvo está armazenado
"""
min = 0
max = len(array) - 1
while (min <= max):
mid = (min + max) // 2
if (array[mid] == alvo):
return mid
else:
if (array[mid] < alvo):
min = mid + 1
else:
max = mid - 1
return -1
|
e74fed0781b3c1bed7f5f57713a06c58bcbde107
| 3,643,087
|
def empiricalcdf(data, method='Hazen'):
"""Return the empirical cdf.
Methods available:
Hazen: (i-0.5)/N
Weibull: i/(N+1)
Chegodayev: (i-.3)/(N+.4)
Cunnane: (i-.4)/(N+.2)
Gringorten: (i-.44)/(N+.12)
California: (i-1)/N
Where i goes from 1 to N.
"""
i = np.argsort(np.argsort(data)) + 1.
N = len(data)
method = method.lower()
if method == 'hazen':
cdf = (i-0.5)/N
elif method == 'weibull':
cdf = i/(N+1.)
elif method == 'california':
cdf = (i-1.)/N
elif method == 'chegodayev':
cdf = (i-.3)/(N+.4)
elif method == 'cunnane':
cdf = (i-.4)/(N+.2)
elif method == 'gringorten':
cdf = (i-.44)/(N+.12)
else:
raise ValueError('Unknown method. Choose among Weibull, Hazen,'
'Chegodayev, Cunnane, Gringorten and California.')
return cdf
|
6150361002d3f008185e5deafabfdc74b3189bd8
| 3,643,088
|
def CCT_to_xy_Kang2002(CCT):
"""
Returns the *CIE XYZ* tristimulus values *CIE xy* chromaticity coordinates
from given correlated colour temperature :math:`T_{cp}` using
*Kang et al. (2002)* method.
Parameters
----------
CCT : numeric or array_like
Correlated colour temperature :math:`T_{cp}`.
Returns
-------
ndarray
*CIE xy* chromaticity coordinates.
Raises
------
ValueError
If the correlated colour temperature is not in appropriate domain.
References
----------
:cite:`Kang2002a`
Examples
--------
>>> CCT_to_xy_Kang2002(6504.38938305) # doctest: +ELLIPSIS
array([ 0.313426 ..., 0.3235959...])
"""
CCT = as_float_array(CCT)
if np.any(CCT[np.asarray(np.logical_or(CCT < 1667, CCT > 25000))]):
usage_warning(('Correlated colour temperature must be in domain '
'[1667, 25000], unpredictable results may occur!'))
x = np.where(
CCT <= 4000,
-0.2661239 * 10 ** 9 / CCT ** 3 - 0.2343589 * 10 ** 6 / CCT ** 2 +
0.8776956 * 10 ** 3 / CCT + 0.179910,
-3.0258469 * 10 ** 9 / CCT ** 3 + 2.1070379 * 10 ** 6 / CCT ** 2 +
0.2226347 * 10 ** 3 / CCT + 0.24039,
)
cnd_l = [CCT <= 2222, np.logical_and(CCT > 2222, CCT <= 4000), CCT > 4000]
i = -1.1063814 * x ** 3 - 1.34811020 * x ** 2 + 2.18555832 * x - 0.20219683
j = -0.9549476 * x ** 3 - 1.37418593 * x ** 2 + 2.09137015 * x - 0.16748867
k = 3.0817580 * x ** 3 - 5.8733867 * x ** 2 + 3.75112997 * x - 0.37001483
y = np.select(cnd_l, [i, j, k])
xy = tstack([x, y])
return xy
|
cb9462e2b38bf5c55e7d7984632923ba9029e1fb
| 3,643,089
|
def tel_information(tel_number):
"""
check and return a dictionary that has element of validation and operator of number
if number is not valid it return validation = 'False' and operator = 'None'
"""
validation = is_valid(tel_number)
operator = tel_operator(tel_number)
info_dict = {'validation' : validation, 'operator' : operator}
return (info_dict)
|
b68fe615a3adf5e8a7ac8528f4b89ba2d85b4067
| 3,643,090
|
import pathlib
from typing import Dict
from typing import Any
import yaml
import json
def load_file(file_name: pathlib.Path) -> Dict[str, Any]:
"""
Load JSON or YAML file content into a dict.
This is not intended to be the default load mechanism. It should only be used
if a OSCAL object type is unknown but the context a user is in.
"""
content_type = FileContentType.to_content_type(file_name.suffix)
with file_name.open('r', encoding=const.FILE_ENCODING) as f:
if content_type == FileContentType.YAML:
return yaml.load(f, yaml.FullLoader)
elif content_type == FileContentType.JSON:
return json.load(f)
|
c042d9e94953c2130971fe6ebf4774cd31556256
| 3,643,091
|
from tests.test_plugins.documentations_plugin import DocumentPlugin
def DocumentPlugin():
"""
:return: document plugin class
"""
return DocumentPlugin
|
8cbcc4eb3ee58236f9fbf861a6e33a696db2ddff
| 3,643,092
|
import re
def remove_extended(text):
""" remove Chinese punctuation and Latin Supplement.
https://en.wikipedia.org/wiki/Latin-1_Supplement_(Unicode_block)
"""
# latin supplement: \u00A0-\u00FF
# notice: nbsp is removed here
lsp_pattern = re.compile(r'[\x80-\xFF]')
text = lsp_pattern.sub('', text)
# chinese special character
# chc_pattern = re.compile(r'[\r\t\n\.\!\/_,$%^*(+\"\')]|[+——()?【】“”!,。?、~@#¥%……&*()]')
# text = chc_pattern.sub('',text)
return text
|
52d0f5082b519d06f7dd20ba3d755790b1f3166d
| 3,643,094
|
def appointment_letter(request, tid):
"""Display the appointment letter."""
paf = get_object_or_404(Operation, pk=tid)
return render(
request, 'transaction/appointment_letter.html', {'paf': paf},
)
|
765115cb98e4b99cdff1ad2ad010d635eabf4103
| 3,643,095
|
import random
from typing import Iterable
import itertools
def balance_targets(sentences: Iterable[Sentence], method: str = "downsample_o_cat", shuffle=True) \
-> Iterable[Sentence]:
"""
Oversamples and/or undersamples training sentences by a number of targets.
This is useful for linear shallow classifiers, that are prone to simply overfit the most-occurring category.
See the source code for a documentation of resample methods logic
:param shuffle: whether to shuffle the output
:param sentences: sentences to resample
:param method: resample method, one of {downsample_o_cat, downsample_o_pzk_cats, all_upsampled, remove_o_cat}
:return: resampled, possibly shuffled input sentences
"""
# take the second-top count from categories apart from "Other"
targets = [s.label for s in sentences]
second_top_count = sorted([sum([target == cat for target in targets]) for cat in set(targets) - {"O"}])[-2]
if method == "downsample_o_cat":
# downsample "other" category to second-most-occurring category count
out_sentences = list((random.sample([s for s in sentences if s.label == "O"], second_top_count) +
[s for s in sentences if s.label != "O"]))
elif method == "downsample_o_pzk_cats":
# downsample "other" + "P_ZK" (experience description) category to third-most-occurring category count
out_sentences = list((random.sample([s for s in sentences if s.label == "O"], second_top_count) +
[s for s in sentences if s.label != "O"]))
out_sentences = list((random.sample([s for s in out_sentences if s.label == "P_ZK"], second_top_count) +
[s for s in out_sentences if s.label != "P_ZK"]))
elif method == "all_upsampled":
# upsample all categories to a count of most-occurring one (presumably "other" category)
out_sentences = list(itertools.chain(*[random.choices([s for s in sentences if s.label == cat],
k=second_top_count) for cat in set(targets)]))
elif method == "remove_o_cat":
# completely remove sentences of "other" category
out_sentences = [s for s in sentences if s.label != "O"]
else:
out_sentences = sentences
if shuffle:
# random shuffle output sentences
random.shuffle(out_sentences)
return out_sentences
|
2d0b5736bcfeb6e7b2566791dba4d74ac3c84456
| 3,643,096
|
def discriminator_loss(real_output, fake_output, batch_size):
"""
Computes the discriminator loss after training with HR & fake images.
:param real_output: Discriminator output of the real dataset (HR images).
:param fake_output: Discriminator output of the fake dataset (SR images).
:param batch_size: Batch size.
:return: Discriminator loss.
"""
real_loss = tf.nn.compute_average_loss(cross_entropy(tf.ones_like(real_output), real_output),
global_batch_size=batch_size)
fake_loss = tf.nn.compute_average_loss(cross_entropy(tf.zeros_like(fake_output), fake_output),
global_batch_size=batch_size)
total_loss = real_loss + fake_loss
return total_loss
|
ade81d34b80226a8905d64249187f35c73d496ee
| 3,643,097
|
def NNx(time, IBI, ibimultiplier=1000, x=50):
"""
computes Heart Rate Variability metrics NNx and pNNx
Args:
time (pandas.DataFrame column or pandas series): time column
IBI (pandas.DataFrame column or pandas series): column with inter beat intervals
ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1
x (IntegerType): default = 50; set the number of times successive heartbeat intervals exceed 'x' ms
Returns:
NNx (FloatType): the number of times successive heartbeat intervals exceed x ms
pNNx (FloatType): the proportion of NNx divided by the total number of NN (R-R) intervals.
"""
time = time
ibi = IBI*ibimultiplier
differences = abs(np.diff(ibi))
n = np.sum(differences > x)
p = (n / len(differences)) * 100
return (round(n * 10) / 10), (round(p * 10) / 10)
|
94f7f7ec732532cddfe5c29a273af479733e4ced
| 3,643,098
|
from datetime import datetime
def datetimeobj_YmdHMS(value):
"""Convert timestamp string to a datetime object.
Timestamps strings like '20130618120000' are able to be converted by this
function.
Args:
value: A timestamp string in the format '%Y%m%d%H%M%S'.
Returns:
A datetime object.
Raises:
ValueError: If timestamp is invalid.
Note: The timezone is assumed to be UTC/GMT.
"""
i = int(value)
S = i
M = S//100
H = M//100
d = H//100
m = d//100
Y = m//100
return datetime.datetime(
Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, tzinfo=TZ_GMT
)
|
d1f63b1e50f278bd4dcea78feea8942bc1112c6f
| 3,643,099
|
def home():
"""List devices."""
devices = Device.query.all()
return render_template('devices/home.html', devices=devices)
|
d0f9b14cedf83fbeb35166e1ad9b2de295e2584f
| 3,643,100
|
def translate_value(document_field, form_value):
"""
Given a document_field and a form_value this will translate the value
to the correct result for mongo to use.
"""
value = form_value
if isinstance(document_field, ReferenceField):
value = document_field.document_type.objects.get(id=form_value) if form_value else None
return value
|
5c72764efde00fb4f5093a800706082c1171b5b6
| 3,643,101
|
def error_404(error):
"""Custom 404 Error Page"""
return render_template("error.html", error=error), 404
|
1f1429b8c86ed7a486c498cae6955961f3084ef5
| 3,643,102
|
def sum_num(n1, n2):
"""
Get sum of two numbers
:param n1:
:param n2:
:return:
"""
return(n1 + n2)
|
08477e596317f6b8750debd39b5cf0aa56da857c
| 3,643,103
|
def intensity_slice_volume(kernel_code,
image_variables,
g_variables,
blockdim,
bound_box,
vol_dim,
voxel_size,
poses,
out_points=False):
"""
Function that slices an intensity volume with fan shaped sections
section defined by poses of a curvilinear array
:param kernel_code: CUDA C++ kernel code to compile
:param image_variables: image dimensioning variable list
:param g_variables: All preallocated GPU variables
as described in the preallocation function. A list with
the following indexes:
0 - fan positions in 2D
1 - fan positions in 3D
2 - intensities mapped in fan positions
3 - the target intensity volume
4 - the output images in image space
5 - the 2D fan mask outline
:param blockdim: block dimensions for CUDA kernels
:param bound_box: bounding box of target volume
:param vol_dim: 3D intensity volume dimensions
:param voxel_size: voxel_size of the volume
:param poses: input set of poses
:param out_points: bool to get fan positions or not
:return: positions in 3D, stack of resulting images
"""
# First, compile kernel code with SourceModule
cuda_modules = SourceModule(kernel_code)
# Get image variables from input
fan_parameters = image_variables[0]
slice_dim = image_variables[1]
image_dim = image_variables[2]
pixel_size = image_variables[3]
# Define voxel size for intersection of intensity volume
voxel_size = voxel_size.astype(np.float32)
# Get size of one image, useful to get array of images
im_size = image_dim[0] * image_dim[1]
# Get block and grid dimensions as int
blockdim_x = int(blockdim[0])
blockdim_y = int(blockdim[1])
griddim_x = int(slice_dim[0] / blockdim_x)
griddim_y = int(slice_dim[1] / blockdim_y)
image_num = int(slice_dim[2])
# Convert poses to 1D array to be input in a kernel
pose_array = np.zeros((1, 9 * image_num)).astype(np.float32)
# And an array to offset fan position per image plane
offset_array = np.zeros((1, 3 * image_num)).astype(np.float32)
for p_ind in range(image_num):
pose = poses[:, 4 * p_ind:4 * (p_ind + 1)]
# Allocate the pose
pose_array[0, 9 * p_ind:9 * (p_ind + 1)] = \
np.hstack((pose[0, 0:2], pose[0, 3],
pose[1, 0:2], pose[1, 3],
pose[2, 0:2], pose[2, 3]))
# Allocate the offset
offset_array[0, 3 * p_ind:3 * (p_ind + 1)] = pose[0:3, 1]
# 1-Run position computation kernel, acts on index 0 and 1 of
# the gpu variables, get kernel
transform_kernel = cuda_modules.get_function("transform")
# Then run it
transform_kernel(g_variables[1],
g_variables[0],
drv.In(pose_array),
drv.In(offset_array),
drv.In(fan_parameters),
np.int32(image_num),
block=(blockdim_x, blockdim_y, 3),
grid=(griddim_x, griddim_y, image_num))
# Collect the output to a CPU array
positions_3d = np.empty((1, np.prod(slice_dim) * 3), dtype=np.float32)
# In case points are to be used or visualised (with out_points as True)
if out_points is True:
g_variables[1].get(positions_3d)
positions_3d = positions_3d.reshape([3, np.prod(slice_dim)]).T
# 2-Next step, run slicing kernel, where intensity values are
# placed in the positions. Define volume dimensions
intensity_volume_dims = np.hstack((bound_box[0, :],
vol_dim[0],
vol_dim[1],
vol_dim[2])).astype(np.float32)
# Call kernel from file
slice_kernel = cuda_modules.get_function('weighted_slice')
slice_kernel(g_variables[2],
g_variables[1],
g_variables[3],
drv.In(intensity_volume_dims),
drv.In(voxel_size),
drv.In(slice_dim),
block=(blockdim_x, blockdim_y, 1),
grid=(griddim_x, griddim_y, image_num))
# 3-Map pixels to fan like image
# Define bounds of image output in 2d coordinates as float
image_bounding_box = np.array([-image_dim[0] * pixel_size[0]/2*1000,
0, image_dim[0],
image_dim[1]]).astype(np.float32)
# Allocate output images, the intensity image as a float, and the
# fan outline as an int. These must be in CPU.
intensity_images = np.empty((1, np.prod(image_dim)), dtype=np.float32)
masks = np.empty((1, np.prod(image_dim)), dtype=np.int32)
# Call kernel from file
map_kernel = cuda_modules.get_function('intensity_map_back')
# Then run it, multiplying coordinates value by a 1000, in order
# to avoid sampling errors
map_kernel(g_variables[4],
g_variables[5],
g_variables[2],
g_variables[0]*1000,
drv.In(slice_dim),
drv.In(image_bounding_box),
drv.In(pixel_size*1000),
block=(blockdim_x, blockdim_y, 1),
grid=(griddim_x, griddim_y, image_num))
# Create a volume with generated images
intensity_image_array = np.zeros((image_dim[1],
image_dim[0],
image_dim[2])).astype(np.float32)
# Gather the results
g_variables[4].get(intensity_images)
g_variables[4].fill(0)
g_variables[5].get(masks)
g_variables[5].fill(0)
for plane in range(image_num):
# Get image and reshape it
current_image = intensity_images[0, im_size*plane:
im_size*(plane+1)]
# Get masks that weight values
current_mask = masks[0, im_size*plane:
im_size*(plane + 1)]
# Normalise by amount of points added to image output, using the
# the occurrences output by mask, ignoring divide error
with np.errstate(divide='ignore'):
current_image = np.divide(current_image, current_mask)
current_image = current_image.reshape(image_dim[0], image_dim[1]).T
# Scale intensities, by setting nan values to minimum
nan_indexes = np.where(np.isnan(current_image))
current_image[nan_indexes] = np.nanmin(current_image)
# Allocate to output
intensity_image_array[:, :, plane] = current_image
# Output a stack of images, where each z-slice has a plane,
# and the corresponding 3D positions
return positions_3d, intensity_image_array
|
c5f64f7ee5a95210a2c5598a7e31e36541fcb320
| 3,643,104
|
def mean_filter(img, kernel_size):
"""take mean value in the neighbourhood of center pixel.
"""
return cv2.blur(img, ksize=kernel_size)
|
58d5684e0691407f6f77d40d5717523eb617dde9
| 3,643,105
|
def main():
"""Return the module instance."""
return AnsibleModule(
argument_spec=dict(
data=dict(default=None),
path=dict(default=None, type=str),
file=dict(default=None, type=str),
)
)
|
846aa9bf9ce23ba7a05aeb91158ad04770b7721e
| 3,643,106
|
from typing import Optional
from pathlib import Path
def load_RegNetwork_interactions(
root_dir: Optional[Path] = None,
) -> pd.DataFrame:
"""
Loads RegNetwork interaction datafile. Downloads the file first if not already present.
"""
file = _download_RegNetwork(root_dir)
return pd.read_csv(
file, delimiter="\t", header=None, names=["g1", "id1", "g2", "id2"]
)
|
15571c71ac3bd386518a0f2ec4d293b20394c4b2
| 3,643,107
|
def get_model_config(model, dataset):
"""Map model name to model network configuration."""
if 'cifar10' == dataset.name:
return get_cifar10_model_config(model)
if model == 'vgg11':
mc = vgg_model.Vgg11Model()
elif model == 'vgg16':
mc = vgg_model.Vgg16Model()
elif model == 'vgg19':
mc = vgg_model.Vgg19Model()
elif model == 'lenet':
mc = lenet_model.Lenet5Model()
elif model == 'googlenet':
mc = googlenet_model.GooglenetModel()
elif model == 'overfeat':
mc = overfeat_model.OverfeatModel()
elif model == 'alexnet':
mc = alexnet_model.AlexnetModel()
elif model == 'trivial':
mc = trivial_model.TrivialModel()
elif model == 'inception3':
mc = inception_model.Inceptionv3Model()
elif model == 'inception4':
mc = inception_model.Inceptionv4Model()
elif model == 'resnet50' or model == 'resnet50_v2':
mc = resnet_model.ResnetModel(model, (3, 4, 6, 3))
elif model == 'resnet101' or model == 'resnet101_v2':
mc = resnet_model.ResnetModel(model, (3, 4, 23, 3))
elif model == 'resnet152' or model == 'resnet152_v2':
mc = resnet_model.ResnetModel(model, (3, 8, 36, 3))
else:
raise KeyError('Invalid model name \'%s\' for dataset \'%s\'' %
(model, dataset.name))
return mc
|
eb3da4fa2e7308fe0b7394b6c654e171abaf2363
| 3,643,110
|
from datetime import datetime
def utc_now():
"""Return current utc timestamp
"""
now = datetime.datetime.utcnow()
return int(now.strftime("%s"))
|
35edc0e19f236263a8f2efd0fa9be81663042484
| 3,643,111
|
def rrc_filter(alpha, length, osFactor, plot=False):
"""
Generates the impulse response of a root raised cosine filter.
Args:
alpha (float): Filter roll-off factor.
length (int): Number of symbols to use in the filter.
osFactor (int): Oversampling factor (number of samples per symbol).
plot (bool): Enable or disable plotting of filter impulse response.
Returns:
(NumPy array): Filter coefficients for use in np.convolve.
"""
if alpha < 0 or alpha > 1.0:
raise error.WfmBuilderError('Invalid \'alpha\' chosen. Use something between 0.1 and 1.')
filterOrder = length * osFactor
# Make GOOD and sure that filterOrder is an integer value
filterOrder = round(filterOrder)
if filterOrder % 2:
raise error.WfmBuilderError('Must use an even number of filter taps.')
delay = filterOrder / 2
t = np.arange(-delay, delay) / osFactor
# Calculate the impulse response without warning about the inevitable divide by zero operations
# I promise we will deal with those down the road
with np.errstate(divide='ignore', invalid='ignore'):
h = -4 * alpha / osFactor * (np.cos((1 + alpha) * np.pi * t) +
np.sin((1 - alpha) * np.pi * t) / (4 * alpha * t)) / (np.pi * ((4 * alpha * t) ** 2 - 1))
# Find middle point of filter and manually populate the value
# np.where returns a list of indices where the argument condition is True in an array. Nice.
idx0 = np.where(t == 0)
h[idx0] = -1 / (np.pi * osFactor) * (np.pi * (alpha - 1) - 4 * alpha)
# Define machine precision used to check for near-zero values for small-number arithmetic
eps = np.finfo(float).eps
# Find locations of divide by zero points
divZero = abs(abs(4 * alpha * t) - 1)
# np.where returns a list of indices where the argument condition is True. Nice.
idx1 = np.where(divZero < np.sqrt(eps))
# Manually populate divide by zero points
h[idx1] = 1 / (2 * np.pi * osFactor) * (np.pi * (alpha + 1) * np.sin(np.pi * (alpha + 1) /
(4 * alpha)) - 4 * alpha * np.sin(np.pi * (alpha - 1) /
(4 * alpha)) + np.pi * (alpha - 1) * np.cos(np.pi * (alpha - 1) / (4 * alpha)))
# Normalize filter energy to 1
h = h / np.sqrt(np.sum(h ** 2))
if plot:
plt.plot(t, h)
plt.title('Filter Impulse Response')
plt.ylabel('h(t)')
plt.xlabel('t')
plt.show()
return h
|
9fc5c916e646179ac465fb2d3d897d4dadadd9de
| 3,643,112
|
def get_available_services(project_dir: str):
"""Get standard services bundled with stakkr."""
services_dir = file_utils.get_dir('static') + '/services/'
conf_files = _get_services_from_dir(services_dir)
services = dict()
for conf_file in conf_files:
services[conf_file[:-4]] = services_dir + conf_file
services = _add_local_services(project_dir, services)
return services
|
b361eaefd0772ca9bcc75274f19e7550b02d1484
| 3,643,113
|
def build_insert(table, to_insert):
"""
Build an insert request.
Parameters
----------
table : str
Table where query will be directed.
to_insert: iterable
The list of columns where the values will be inserted.
Returns
-------
str
Built query.
"""
sql_q = 'INSERT INTO \"' + table + '\" ('
sql_q += ', '.join('{0}'.format(w) for w in to_insert)
sql_q += ') VALUES ('
sql_q += ', '.join(':{0}'.format(w) for w in to_insert)
sql_q += ')'
return sql_q
|
cf2e72c57e5502660ed3dcade6885076ff8c2014
| 3,643,114
|
from pathlib import Path
import json
def get_reference_data(fname):
"""
Load JSON reference data.
:param fname: Filename without extension.
:type fname: str
"""
base_dir = Path(__file__).resolve().parent
fpath = base_dir.joinpath('reference', 'data', fname + '.json')
with fpath.open() as f:
return json.load(f)
|
73880586393ce9463a356d69880f2f285058637f
| 3,643,115
|
def _is_l10n_ch_isr_issuer(account_ref, currency_code):
""" Returns True if the string account_ref is a valid a valid ISR issuer
An ISR issuer is postal account number that starts by 01 (CHF) or 03 (EUR),
"""
if (account_ref or '').startswith(ISR_SUBSCRIPTION_CODE[currency_code]):
return _is_l10n_ch_postal(account_ref)
return False
|
5709d8f67aefe9b9faac6f4541f8a050eb95c82f
| 3,643,116
|
import struct
def little_endian_uint32(i):
"""Return the 32 bit unsigned integer little-endian representation of i"""
s = struct.pack('<I', i)
return struct.unpack('=I', s)[0]
|
07f72baaf8f7143c732fd5b9e56b0b7d02d531bd
| 3,643,117
|
def evaluate_scores(scores_ID, scores_OOD):
"""calculates classification performance (ROCAUC, FPR@TPR95) based on lists of scores
Returns:
ROCAUC, fpr95
"""
labels_in = np.ones(scores_ID.shape)
labels_out = np.zeros(scores_OOD.shape)
y = np.concatenate([labels_in, labels_out])
score = np.concatenate([scores_ID, scores_OOD])
fpr, tpr, _ = roc_curve(y, score)
roc_auc = auc(fpr, tpr)
ii=np.where(tpr>0.95)[0][0]
return roc_auc, fpr[ii]
|
f88a67f09496700ab783a3e91347b085767a2228
| 3,643,118
|
from typing import Union
def check_cardinality(attribute_name: str,
analysis: run_metadata_pb2.Analysis
) -> Union[None, str]:
"""Check whether the cardinality exceeds the predefined threshold
Args:
attribute_name: (string),
analysis: (run_metadata_pb2.Analysis), analysis that contain the result
of cardinality
Returns:
Union[None, string]
"""
metrics = analysis.smetrics
cardinality = 0
for item in metrics:
if item.name == run_metadata_pb2.ScalarMetric.CARDINALITY:
cardinality = item.value
if cardinality > CARDINALITY_THRESHOLD:
return template.HIGH_CARDINALITY.format(
name=attribute_name,
value=cardinality
)
return None
|
8a9b9e0c709b64273a2120100730992276b52b46
| 3,643,119
|
def create_df_from(dataset):
"""
Selects a method, based on the given dataset name, and creates the corresponding dataframe.
When adding a new method, take care to have as index the ASN and the column names to be of the format "dataset_name_"+"column_name" (e.g., the column "X" from the dataset "setA", should be "setA_X")
:param dataset: (type = string) name of the dataset to be loaded
:return: A dataframe with indexes the ASNs and columns the features loaded from the given dataset
"""
if dataset == 'AS_rank':
data = create_df_from_AS_rank()
elif dataset == 'personal':
data = create_df_from_personal()
elif dataset == 'PeeringDB':
data = create_df_from_PeeringDB()
elif dataset == 'AS_hegemony':
data = create_df_from_AS_hegemony()
elif dataset == 'Atlas_probes':
data = create_df_from_Atlas_probes()
else:
raise Exception('Not defined dataset')
return data
|
3a5e6f1a9aa510ec19c6eeb1af8a89574b938ea1
| 3,643,120
|
def plasma_fractal(mapsize=256, wibbledecay=3):
"""
Generate a heightmap using diamond-square algorithm.
Return square 2d array, side length 'mapsize', of floats in range 0-255.
'mapsize' must be a power of two.
"""
assert (mapsize & (mapsize - 1) == 0)
maparray = np.empty((mapsize, mapsize), dtype=np.float_)
maparray[0, 0] = 0
stepsize = mapsize
wibble = 100
def wibbledmean(array):
return array / 4 + wibble * np.random.uniform(-wibble, wibble, array.shape)
def fillsquares():
"""For each square of points stepsize apart,
calculate middle value as mean of points + wibble"""
cornerref = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
squareaccum = cornerref + np.roll(cornerref, shift=-1, axis=0)
squareaccum += np.roll(squareaccum, shift=-1, axis=1)
maparray[stepsize // 2:mapsize:stepsize,
stepsize // 2:mapsize:stepsize] = wibbledmean(squareaccum)
def filldiamonds():
"""For each diamond of points stepsize apart,
calculate middle value as mean of points + wibble"""
mapsize = maparray.shape[0]
drgrid = maparray[stepsize // 2:mapsize:stepsize, stepsize // 2:mapsize:stepsize]
ulgrid = maparray[0:mapsize:stepsize, 0:mapsize:stepsize]
ldrsum = drgrid + np.roll(drgrid, 1, axis=0)
lulsum = ulgrid + np.roll(ulgrid, -1, axis=1)
ltsum = ldrsum + lulsum
maparray[0:mapsize:stepsize, stepsize // 2:mapsize:stepsize] = wibbledmean(ltsum)
tdrsum = drgrid + np.roll(drgrid, 1, axis=1)
tulsum = ulgrid + np.roll(ulgrid, -1, axis=0)
ttsum = tdrsum + tulsum
maparray[stepsize // 2:mapsize:stepsize, 0:mapsize:stepsize] = wibbledmean(ttsum)
while stepsize >= 2:
fillsquares()
filldiamonds()
stepsize //= 2
wibble /= wibbledecay
maparray -= maparray.min()
return maparray / maparray.max()
|
f3b0c65f7bff6526a91c8d398a430a72cf744421
| 3,643,121
|
import pickle
def read_ids():
"""
Reads the content from a file as a tuple and returns the tuple
:return: node_id, pool_id (or False if no file)
"""
if not const.MEMORY_FILE.exists():
return False
with open(const.MEMORY_FILE, 'rb') as f:
data = pickle.load(f)
assert type(data) is tuple and len(data) == 2
node_id, pool_id = data
return node_id, pool_id
|
89606543b149cac636765a6f3e2aef34f2adc38b
| 3,643,123
|
from pymbolic.primitives import Call
def _match_caller_callee_argument_dimension_(program, callee_function_name):
"""
Returns a copy of *program* with the instance of
:class:`loopy.kernel.function_interface.CallableKernel` addressed by
*callee_function_name* in the *program* aligned with the argument
dimensions required by *caller_knl*.
.. note::
The callee kernel addressed by *callee_function_name*, should be
called at only one location throughout the program, as multiple
invocations would demand complex renaming logic which is not
implemented yet.
"""
assert isinstance(program, TranslationUnit)
assert isinstance(callee_function_name, str)
assert callee_function_name not in program.entrypoints
assert callee_function_name in program.callables_table
is_invoking_callee = _FunctionCalledChecker(
callee_function_name).map_kernel
caller_knl, = [in_knl_callable.subkernel for in_knl_callable in
program.callables_table.values() if isinstance(in_knl_callable,
CallableKernel) and
is_invoking_callee(in_knl_callable.subkernel)]
assert len([insn for insn in caller_knl.instructions if (isinstance(insn,
CallInstruction) and isinstance(insn.expression, Call) and
insn.expression.function.name == callee_function_name)]) == 1
new_callee_kernel = _match_caller_callee_argument_dimension_for_single_kernel(
caller_knl, program[callee_function_name])
return program.with_kernel(new_callee_kernel)
|
7c37a20776e1ff551dca3f2acd1b36e47cf6b06e
| 3,643,124
|
def new_automation_jobs(issues):
"""
:param issues: issues object pulled from Redmine API
:return: returns a new subset of issues that are Status: NEW and match a term in AUTOMATOR_KEYWORDS)
"""
new_jobs = {}
for issue in issues:
# Only new issues
if issue.status.name == 'New':
# Strip whitespace and make lowercase ('subject' is the job type i.e. Diversitree)
subject = issue.subject.lower().replace(' ', '')
# Check for presence of an automator keyword in subject line
if subject == 'iridaretrieve':
new_jobs[issue] = subject
return new_jobs
|
74c9c96aeeea1d15384d617c266daa4d49f3a203
| 3,643,125
|
def make_data(revs, word_idx_map, max_l=50, filter_h=3, val_test_splits=[2, 3], validation_num=500000):
"""
Transforms sentences into a 2-d matrix.
"""
version = begin_time()
train, val, test = [], [], []
for rev in revs:
sent = get_idx_from_sent_msg(rev["m"], word_idx_map, max_l, True)
sent += get_idx_from_sent(rev["r"], word_idx_map, max_l, True)
sent += get_session_mask(rev["m"])
sent.append(int(rev["y"]))
if len(val) >= validation_num:
train.append(sent)
else:
val.append(sent)
train = np.array(train, dtype="int")
val = np.array(val, dtype="int")
test = np.array(test, dtype="int")
print('trainning data', len(train), 'val data',
len(val), 'spend time:', spend_time(version))
return [train, val, test]
|
b141297c0ef8d2eeb2c6c62e00924f5e64ffe266
| 3,643,127
|
def init(param_test):
"""
Initialize class: param_test
"""
# initialization
param_test.default_args_values = {'di': 6.85, 'da': 7.65, 'db': 7.02}
default_args = ['-di 6.85 -da 7.65 -db 7.02'] # default parameters
param_test.default_result = 6.612133606
# assign default params
if not param_test.args:
param_test.args = default_args
return param_test
|
d86cd246d4beb5aa267d222bb12f9637f001032d
| 3,643,128
|
def add_width_to_df(df):
"""Adds an extra column "width" to df which is the angular width of the CME
in degrees.
"""
df = add_helcats_to_df(df, 'PA-N [deg]')
df = add_helcats_to_df(df, 'PA-S [deg]')
df = add_col_to_df(df, 'PA-N [deg]', 'PA-S [deg]', 'subtract', 'width', abs_col=True)
return df
|
ea866d161ca77d9d78f04fb613aa1ed8631566b2
| 3,643,129
|
def checkSeconds(seconds, timestamp):
""" Return a string depending on the value of seconds
If the block is mined since one hour ago, return timestamp
"""
if 3600 > seconds > 60:
minute = int(seconds / 60)
if minute == 1:
return '{} minute ago'.format(minute)
return '{} minutes ago'.format(minute)
else:
return 'Since {} sec'.format(seconds)
|
2d07657a14300793a116d28e7c9495ae4a1b61ed
| 3,643,130
|
def get_netrange_end(asn_cidr):
"""
:param str asn_cidr: ASN CIDR
:return: ipv4 address of last IP in netrange
:rtype: str
"""
try:
last_in_netrange = \
ip2long(str(ipcalc.Network(asn_cidr).host_first())) + \
ipcalc.Network(asn_cidr).size() - 2
except ValueError, error:
print 'Issue calculating size of %s network' % asn_cidr
raise error
return socket.inet_ntoa(struct.pack('!L', last_in_netrange))
|
51305dc1540bbc0a361452a80d6732b1eb039fd4
| 3,643,131
|
def load_from_file(filepath, column_offset=0, prefix='', safe_urls=False, delimiter='\s+'):
"""
Load target entities and their labels if exist from a file.
:param filepath: Path to the target entities
:param column_offset: offset to the entities column (optional).
:param prefix: URI prefix (Ex: https://yago-expr.org) if the data lacks one.
(needed when using rdflib and/or virtouso) (optional)
:param safe_urls: Encode URIs if they are not safe for rdflib, eg. contains '(' or special chars (optional)
:param delimiter: splitting delimiter in the file (optional)
:return: EntityLabelsInterface object to access the entities and their labels and also to use them as triples.
:rtype: EntitiesLabelsFile
"""
return EntitiesLabelsFile(filepath, column_offset=column_offset, prefix=prefix, safe_urls=safe_urls, delimiter=delimiter)
|
2aa9b286e25c6e93a06afb927f7e0ad345208afb
| 3,643,132
|
def _get_repos_info(db: Session, user_id: int):
"""Returns data for all starred repositories for a user.
The return is in a good format for the frontend.
Args:
db (Session): sqlAlchemy connection object
user_id (int): User id
Returns:
list[Repository(dict)]:repo_info = {
"id": (int),
"github_repo_id": (int),
"name": (str),
"description": (str),
"html_url": (str),
"tags": list[dict]
}
"""
repos = _get_repos_in_db(db=db, user_id=user_id,
only_starred_repos=True)
list_of_repos = []
for repo in repos:
repo_info = {
"id": repo.id,
"github_repo_id": repo.github_repo_id,
"name": repo.name,
"description": repo.description,
"html_url": repo.html_url,
"tags": _get_all_tags_in_repo(repo_id=repo.id, db=db)
}
list_of_repos.append(repo_info)
return list_of_repos
|
78a126369355c1c76fc6a1b673b365e3423cd011
| 3,643,135
|
def ultimate_oscillator(close_data, low_data):
"""
Ultimate Oscillator.
Formula:
UO = 100 * ((4 * AVG7) + (2 * AVG14) + AVG28) / (4 + 2 + 1)
"""
a7 = 4 * average_7(close_data, low_data)
a14 = 2 * average_14(close_data, low_data)
a28 = average_28(close_data, low_data)
uo = 100 * ((a7 + a14 + a28) / 7)
return uo
|
9803eda656cdb9dd49621a93785b55cf5bc15e7c
| 3,643,136
|
def get_completions():
"""
Returns the global completion list.
"""
return completionList
|
901718ea73b5328c277c357ecac859b40518890d
| 3,643,138
|
def breadth_first_search():
"""
BFS Algorithm
"""
initial_state = State(3, 3, "left", 0, 0)
if initial_state.is_goal():
return initial_state
frontier = list()
explored = set()
frontier.append(initial_state)
while frontier:
state = frontier.pop(0)
if state.is_goal():
return state
explored.add(state)
children = successors(state)
for child in children:
if (child not in explored) or (child not in frontier):
frontier.append(child)
return None
|
2c0dca233b2bdb4474dd17ee7386ed15f5af44c1
| 3,643,139
|
import pandas
def rankSimilarity(df, top = True, rank = 3):
""" Returns the most similar documents or least similar documents
args:
df (pandas.Dataframe): row, col = documents, value = boolean similarity
top (boolean): True: most, False: least (default = True)
rank (int): number of top or bottom (default = 3)
returns:
pandas.Dataframe: row =rank, columns = indices, names, value
"""
df2 = df.copy(deep = True)
df_np = df2.as_matrix()
if top:
np.fill_diagonal(df_np, -1)
results_dic = {"indices": [], "names": [], "value": [] }
for n in range(rank):
if top:
indices = np.unravel_index(df_np.argmax(), df_np.shape) # returns indices of first max found
# np.where(df_np == df_np.max()) # will return all indices of maxs
else:
indices = np.unravel_index(df_np.argmin(), df_np.shape) # returns indices of first min found
# np.where(df_np == df_np.min()) # will return all indices of mins
results_dic["indices"].append(indices)
results_dic["names"].append((df.index[indices[0]], df.index[indices[1]]))
results_dic["value"].append(df.iloc[indices])
if top:
df_np[indices[0],indices[1]] = -1 # set to -1 to find the next max
df_np[indices[1],indices[0]] = -1 # because symmetric
else:
df_np[indices[0],indices[1]] = 1 # set to 1 to find the next min
df_np[indices[1],indices[0]] = 1 # because symmetric
df_result = pandas.DataFrame(results_dic, index = range(1,rank+1))
df_result.index.name = "rank"
return df_result
|
7ae5a90ced7dbbd79d5f296a6f31f1236384ba7a
| 3,643,140
|
def change_controller(move_group, second_try=False):
"""
Changes between motor controllers
move_group -> Name of required move group.
"""
global list_controllers_service
global switch_controllers_service
controller_map = {
'gripper': 'cartesian_motor_controller',
'whole_arm': 'cartesian_motor_controller',
'realsense': 'cartesian_motor_controller_realsense',
'sucker': 'cartesian_motor_controller_sucker',
'wrist_only': 'cartesian_motor_controller_wrist'
}
rospy.loginfo('SWITCHING CONTROLLERS')
if move_group not in controller_map:
rospy.logerr('%s is not a valid move group for switching controllers' % move_group)
return False
wanted_controller = controller_map[move_group]
c_list = list_controllers_service.call()
running_controllers = []
for c in c_list.controller:
if c.name == 'joint_state_controller':
continue
if c.name == wanted_controller and c.state == 'running':
rospy.loginfo('Controller %s is already running' % wanted_controller)
return True
if c.state == 'running':
running_controllers.append(c.name)
controllerSwitch = cmsv.SwitchControllerRequest()
controllerSwitch.strictness = 1
controllerSwitch.start_controllers = [wanted_controller]
controllerSwitch.stop_controllers = running_controllers
# Return True if controller was successfully switched
res = switch_controllers_service(controllerSwitch).ok
if res:
rospy.loginfo('Successfully switched controllers for move group %s' % move_group)
return res
elif second_try == False:
rospy.logerr('Failed to switch controllers for move group %s' % move_group)
rospy.sleep(1.0)
return change_controller(move_group, True)
else:
return False
|
8521e1c2967368a5c8ac956fc26d4da879919a2d
| 3,643,141
|
import base64
def base64_encode(text):
"""<string> -- Encode <string> with base64."""
return base64.b64encode(text.encode()).decode()
|
ce837abde42e9a00268e14cfbd2bd4fd3cf16208
| 3,643,142
|
def _signed_bin(n):
"""Transform n into an optimized signed binary representation"""
r = []
while n > 1:
if n & 1:
cp = _gbd(n + 1)
cn = _gbd(n - 1)
if cp > cn: # -1 leaves more zeroes -> subtract -1 (= +1)
r.append(-1)
n += 1
else: # +1 leaves more zeroes -> subtract +1 (= -1)
r.append(+1)
n -= 1
else:
r.append(0) # be glad about one more zero
n >>= 1
r.append(n)
return r[::-1]
|
5f9f57e02942264901f6523962b21d1c36accdb2
| 3,643,143
|
def get_neighbor_v6_by_ids(obj_ids):
"""Return NeighborV6 list by ids.
Args:
obj_ids: List of Ids of NeighborV6's.
"""
ids = list()
for obj_id in obj_ids:
try:
obj = get_neighbor_v6_by_id(obj_id).id
ids.append(obj)
except exceptions.NeighborV6DoesNotExistException as e:
raise api_rest_exceptions.ObjectDoesNotExistException(str(e))
except Exception as e:
raise api_rest_exceptions.NetworkAPIException(str(e))
return NeighborV6.objects.filter(id__in=ids)
|
a51d618961fa3e60c0c464473838791d55ba1f6a
| 3,643,146
|
import base64
def decode_b64_to_image(b64_str: str) -> [bool, np.ndarray]:
"""解码base64字符串为OpenCV图像, 适用于解码三通道彩色图像编码.
:param b64_str: base64字符串
:return: ok, cv2_image
"""
if "," in b64_str:
b64_str = b64_str.partition(",")[-1]
else:
b64_str = b64_str
try:
img = base64.b64decode(b64_str)
return True, cv2.imdecode(np.frombuffer(img, dtype=np.int8), 1)
except cv2.error:
return False, None
|
66f0e7bb5028ad7247ef7cb468e904c2bc7afdb7
| 3,643,147
|
def _get_index_videos(course, pagination_conf=None):
"""
Returns the information about each video upload required for the video list
"""
course_id = str(course.id)
attrs = [
'edx_video_id', 'client_video_id', 'created', 'duration',
'status', 'courses', 'transcripts', 'transcription_status',
'error_description'
]
def _get_values(video):
"""
Get data for predefined video attributes.
"""
values = {}
for attr in attrs:
if attr == 'courses':
course = [c for c in video['courses'] if course_id in c]
(__, values['course_video_image_url']), = list(course[0].items())
else:
values[attr] = video[attr]
return values
videos, pagination_context = _get_videos(course, pagination_conf)
return [_get_values(video) for video in videos], pagination_context
|
5a3288ff8c2f371505fe2c6a3051992bfcc602eb
| 3,643,148
|
def get_user_by_api_key(api_key, active_only=False):
"""
Get a User object by api_key, whose attributes match those in the database.
:param api_key: API key to query by
:param active_only: Set this flag to True to only query for active users
:return: User object for that user ID
:raises UserDoesNotExistException: If no user exists with the given user_id
"""
if active_only:
user = models.User.query.filter_by(api_key=api_key, is_active=True).first()
else:
user = models.User.query.filter_by(api_key=api_key).first()
if not user:
raise UserDoesNotExistException('No user with api_key {api_key} exists'.format(api_key=api_key))
return user
|
b36373dbfcda80f6aac963153a66b54bce1d828d
| 3,643,149
|
def get_pixel_values_of_line(img, x0, y0, xf, yf):
"""
get the value of a line of pixels.
the line defined by the user using the corresponding first and last
pixel indices.
Parameters
----------
img : np.array.
image on a 2d np.array format.
x0 : int
raw number of the starting pixel
y0 : int
column number of the starting pixel.
xf : int
raw number of the ending pixel.
yf : int
column number of the ending pixel.
Returns
-------
line_pixel_values : np.array
1d np.array representing the values of the chosen line of pixels.
"""
rr, cc = np.array(draw.line(x0, y0, xf, yf))
# line_pixel_values = [img[rr[i], cc[i]] for i in range(len(rr))]
line_pixel_values = img[rr, cc]
return line_pixel_values
|
ea78efe02130302b34ba8402f21349035b05b2e0
| 3,643,150
|
def _filter_out_variables_not_in_dataframe(X, variables):
"""Filter out variables that are not present in the dataframe.
Function removes variables that the user defines in the argument `variables`
but that are not present in the input dataframe.
Useful when ussing several feature selection procedures in a row. The dataframe
input to the first selection algorithm likely contains more variables than the
input dataframe to subsequent selection algorithms, and it is not possible a
priori, to say which variable will be dropped.
Parameters
----------
X: pandas DataFrame
variables: string, int or list of (strings or int).
Returns
-------
filtered_variables: List of variables present in `variables` and in the
input dataframe.
"""
# When variables is not defined, keep it like this and return None.
if variables is None:
return None
# If an integer or a string is provided, convert to a list.
if not isinstance(variables, list):
variables = [variables]
# Filter out elements of variables that are not in the dataframe.
filtered_variables = [var for var in variables if var in X.columns]
# Raise an error if no column is left to work with.
if len(filtered_variables) == 0:
raise ValueError(
"After filtering no variable remaining. At least 1 is required."
)
return filtered_variables
|
63b4cce75741a5d246f40c5b88cfebaf818b3482
| 3,643,151
|
import gzip
def file_format(input_files):
"""
Takes all input files and checks their first character to assess
the file format. 3 lists are return 1 list containing all fasta files
1 containing all fastq files and 1 containing all invalid files
"""
fasta_files = []
fastq_files = []
invalid_files = []
# Open all input files and get the first character
for infile in input_files:
try:
f = gzip.open(infile, "rb")
fst_char = f.read(1)
except OSError:
f = open(infile, "rb")
fst_char = f.read(1)
f.close()
#fst_char = f.readline().decode("ascii")[0]
#print(fst_char)
# Return file format based in first char
if fst_char == b'@':
fastq_files.append(infile)
elif fst_char == b'>':
fasta_files.append(infile)
else:
invalid_files.append(infile)
return (fasta_files, fastq_files, invalid_files)
|
acd9a0f7b49884d611d0ac65b43407a323a6588b
| 3,643,152
|
def sub_vector(v1: Vector3D, v2: Vector3D) -> Vector3D:
"""Substract vector V1 from vector V2 and return resulting Vector.
Keyword arguments:
v1 -- Vector 1
v2 -- Vector 2
"""
return [v1[0] - v2[0], v1[1] - v2[1], v1[2] - v2[2]]
|
2c9878d6775fdcee554f959e392b2c8d2bad8c8e
| 3,643,153
|
def create_validity_dict(validity_period):
"""Convert a validity period string into a dict for issue_certificate().
Args:
validity_period (str): How long the signed certificate should be valid for
Returns:
dict: A dict {"Value": number, "Type": "string" } representation of the
validity period
"""
validity_suffix = validity_period[-1:]
if validity_suffix == "d":
validity_unit = "DAYS"
elif validity_suffix == "m":
validity_unit = "MONTHS"
elif validity_suffix == "y":
validity_unit = "YEARS"
return {"Value": int(validity_period[:-1]), "Type": validity_unit}
|
ba0ccdd5c009a930b4030b15fbafaa978fe753d4
| 3,643,154
|
def analyse_latency(cid):
"""
Parse the resolve_time and download_time info from cid_latency.txt
:param cid: cid of the object
:return: time to resolve the source of the content and time to download the content
"""
resolve_time = 0
download_time = 0
with open(f'{cid}_latency.txt', 'r') as stdin:
for line in stdin.readlines():
"""
The output of the ipfs get <cid> command is in the form of:
Started: 02-19-2022 01:51:16
Resolve Ended: 02-19-2022 01:51:16
Resolve Duraution: 0.049049
Download Ended: 02-19-2022 01:51:16
Download Duraution: 0.006891
Total Duraution: 0.055940
"""
if "Resolve Duraution:" in line:
resolve_time = line.split(": ")[1]
resolve_time = resolve_time.split("\n")[0]
if "Download Duraution:" in line:
download_time = line.split(": ")[1]
download_time = download_time.split("\n")[0]
return resolve_time, download_time
|
806a9969cc934faeea842901442ecececfdde232
| 3,643,155
|
import re
def process_ref(paper_id):
"""Attempt to extract arxiv id from a string"""
# if user entered a whole url, extract only the arxiv id part
paper_id = re.sub("https?://arxiv\.org/(abs|pdf|ps)/", "", paper_id)
paper_id = re.sub("\.pdf$", "", paper_id)
# strip version
paper_id = re.sub("v[0-9]+$", "", paper_id)
# remove leading arxiv, i.e., such that paper_id=' arXiv: 2001.1234' is still valid
paper_id = re.sub("^\s*arxiv[:\- ]", "", paper_id, flags=re.IGNORECASE)
return paper_id
|
a1c817f1ae7b211973efd6c201b5c13e1a91b57b
| 3,643,156
|
import re
def augment_test_func(test_func):
"""Augment test function to parse log files.
`tools.create_tests` creates functions that run an LBANN
experiment. This function creates augmented functions that parse
the log files after LBANN finishes running, e.g. to check metrics
or runtimes.
Note: The naive approach is to define the augmented test functions
in a loop. However, Python closures are late binding. In other
words, the function would be overwritten every time we define it.
We get around this overwriting problem by defining the augmented
function in the local scope of another function.
Args:
test_func (function): Test function created by
`tools.create_tests`.
Returns:
function: Test that can interact with PyTest.
"""
test_name = test_func.__name__
# Define test function
def func(cluster, dirname):
# Run LBANN experiment
experiment_output = test_func(cluster, dirname)
# Parse LBANN log file
train_accuracy = None
gpu_usage = None
mini_batch_times = []
gpu_usages = []
with open(experiment_output['stdout_log_file']) as f:
for line in f:
match = re.search('training epoch [0-9]+ objective function : ([0-9.]+)', line)
if match:
train_accuracy = float(match.group(1))
match = re.search('training epoch [0-9]+ mini-batch time statistics : ([0-9.]+)s mean', line)
if match:
mini_batch_times.append(float(match.group(1)))
match = re.search('GPU memory usage statistics : ([0-9.]+) GiB mean', line)
if match:
gpu_usages.append(float(match.group(1)))
# Check if training accuracy is within expected range
assert (expected_accuracy_range[0]
< train_accuracy
<expected_accuracy_range[1]), \
'train accuracy is outside expected range'
#Only tested on Ray. Skip if mini-batch test on another cluster. Change this when mini-batch values are available for other clusters
# Check if mini-batch time is within expected range
# Note: Skip first epoch since its runtime is usually an outlier
mini_batch_times = mini_batch_times[1:]
mini_batch_time = sum(mini_batch_times) / len(mini_batch_times)
assert (0.75 * expected_mini_batch_times[cluster]
< mini_batch_time
< 1.25 * expected_mini_batch_times[cluster]), \
'average mini-batch time is outside expected range'
# Check for GPU usage and memory leaks
# Note: Skip first epoch
gpu_usages = gpu_usages[1:]
gpu_usage = sum(gpu_usages)/len(gpu_usages)
assert (0.75 * expected_gpu_usage[cluster]
< gpu_usage
< 1.25 * expected_gpu_usage[cluster]),\
'average gpu usage is outside expected range'
# Return test function from factory function
func.__name__ = test_name
return func
|
081593b57dfc82df328617b22cf778fceffe4beb
| 3,643,157
|
def nstep_td(env, pi, alpha=1, gamma=1, n=1, N_episodes=1000,
ep_max_length=1000):
"""Evaluates state-value function with n-step TD
Based on Sutton/Barto, Reinforcement Learning, 2nd ed. p. 144
Args:
env: Environment
pi: Policy
alpha: Step size
gamma: Discount factor
n: Number of steps
N_episodes: Run this many episodes
ep_max_length: Force termination of episode after this number of steps
Returns:
v: State-value function
"""
v = defaultdict(lambda: 0)
for i_episode in range(N_episodes):
print("\r> N-step TD: Episode {}/{}".format(
i_episode+1, N_episodes), end="")
state = env.reset()
rewards = [0]
states = []
t = 0
T = np.inf
done = False
while t < T and t < ep_max_length:
if not done:
action = select_action_policy(pi, state)
state_new, reward, done, info = env.step(action)
rewards.append(reward)
states.append(state)
state = state_new
if done:
T = t+n+1
if t-n >= 0:
G = 0
for i in range(min(n,T-t)):
G += gamma**i * rewards[t-n+1+i]
if t < T-n:
G += gamma**n * v[states[t]]
v[states[t-n]] += alpha*(G - v[states[t-n]])
t += 1
print()
return v
|
86d5ab58d4d185dcbf08a84bbc8eb67051d2af21
| 3,643,159
|
def open_file(path):
"""more robust open function"""
return open(path, encoding='utf-8')
|
785ab196756365d1f27ce3fcd69d0ba2867887a9
| 3,643,160
|
def test_subscribe(env):
"""Check async. interrupt if a process terminates."""
def child(env):
yield env.timeout(3)
return 'ohai'
def parent(env):
child_proc = env.process(child(env))
subscribe_at(child_proc)
try:
yield env.event()
except Interrupt as interrupt:
assert interrupt.cause[0] is child_proc
assert interrupt.cause[1] == 'ohai'
assert env.now == 3
env.process(parent(env))
env.run()
|
fa3170cc6167e92195587f06ae65b27da48fa8ff
| 3,643,161
|
import gzip
def _parse_data(f, dtype, shape):
"""Parses the data."""
dtype_big = np.dtype(dtype).newbyteorder(">")
count = np.prod(np.array(shape))
# See: https://github.com/numpy/numpy/issues/13470
use_buffer = type(f) == gzip.GzipFile
if use_buffer:
data = np.frombuffer(f.read(), dtype_big, count)
else:
data = np.fromfile(f, dtype_big, count)
return data.astype(dtype).reshape(shape)
|
42185d2425aa9aa14abc0a61a5bdabc95224d15c
| 3,643,163
|
def test_target(target # type: Any
):
"""
A simple decorator to declare that a case function is associated with a particular target.
>>> @test_target(int)
>>> def case_to_test_int():
>>> ...
This is actually an alias for `@case_tags(target)`, that some users may find a bit more readable.
:param target: for example a function, a class... or a string representing a function, a class...
:return:
"""
return case_tags(target)
|
ebbf94941e7b11224ee4c8ee9665cea231076f5d
| 3,643,164
|
def plot_spatial(adata, color, img_key="hires", show_img=True, **kwargs):
"""Plot spatial abundance of cell types (regulatory programmes) with colour gradient
and interpolation (from Visium anndata).
This method supports only 7 cell types with these colours (in order, which can be changed using reorder_cmap).
'yellow' 'orange' 'blue' 'green' 'purple' 'grey' 'white'
:param adata: adata object with spatial coordinates in adata.obsm['spatial']
:param color: list of adata.obs column names to be plotted
:param kwargs: arguments to plot_spatial_general
:return: matplotlib figure
"""
if show_img is True:
kwargs["show_img"] = True
kwargs["img"] = list(adata.uns["spatial"].values())[0]["images"][img_key]
# location coordinates
if "spatial" in adata.uns.keys():
kwargs["coords"] = (
adata.obsm["spatial"] * list(adata.uns["spatial"].values())[0]["scalefactors"][f"tissue_{img_key}_scalef"]
)
else:
kwargs["coords"] = adata.obsm["spatial"]
fig = plot_spatial_general(value_df=adata.obs[color], **kwargs) # cell abundance values
return fig
|
ffbf3cc0f6efdef9bf66b94bac22ef8bf8b39bab
| 3,643,165
|
def stats_by_group(df):
"""Calculate statistics from a groupby'ed dataframe with TPs,FPs and FNs."""
EPSILON = 1e-10
result = df[['tp', 'fp', 'fn']].sum().reset_index().assign(
precision=lambda x: (x['tp'] + EPSILON) /
(x['tp'] + x['fp'] + EPSILON),
recall=lambda x: (x['tp'] + EPSILON) /
(x['tp'] + x['fn'] + EPSILON)).assign(
f1=lambda x: 2 * x['precision'] * x['recall'] /
(x['precision'] + x['recall'] + EPSILON),
count=lambda x: x['tp'] + x['fn'])
result['proportion'] = result['count'] / np.sum(result['count'])
result['proportion_text'] = (result['proportion'] *
100).round(2).astype(str) + "%"
return result
|
c137e4076f837f51b0cab1acbe842ff827b62ee8
| 3,643,166
|
def in_this_prow(prow):
"""
Returns a bool describing whether this processor inhabits `prow`.
Args:
prow: The prow.
Returns:
The bool.
"""
return prow == my_prow()
|
0f159cc9b57f407cbfefe9892689664f6d902f94
| 3,643,168
|
def _keypair_from_file(key_pair_file: str) -> Keypair:
"""Returns a Solana KeyPair from a file"""
with open(key_pair_file) as kpf:
keypair = kpf.read()
keypair = keypair.replace("[", "").replace("]", "")
keypair = list(keypair.split(","))
keypair = [int(i) for i in keypair]
return Keypair(keypair[:32])
|
1fdb4d72945d89db7c8d26c96bcbbd18071258dc
| 3,643,169
|
import binascii
def val_to_bitarray(val, doing):
"""Convert a value into a bitarray"""
if val is sb.NotSpecified:
val = b""
if type(val) is bitarray:
return val
if type(val) is str:
val = binascii.unhexlify(val.encode())
if type(val) is not bytes:
raise BadConversion("Couldn't get bitarray from a value", value=val, doing=doing)
b = bitarray(endian="little")
b.frombytes(val)
return b
|
17081bb8b382763fa5ace4d7d2969b6eed4581ed
| 3,643,170
|
def unpack_uint64_from(buf, offset=0):
"""Unpack a 64-bit unsigned integer from *buf* at *offset*."""
return _uint64struct.unpack_from(buf, offset)[0]
|
ce01d76d18e45a42687d997459da9113d9e3e45f
| 3,643,171
|
def del_none(dictionary):
"""
Recursively delete from the dictionary all entries which values are None.
Args:
dictionary (dict): input dictionary
Returns:
dict: output dictionary
Note:
This function changes the input parameter in place.
"""
for key, value in list(dictionary.items()):
if value is None:
del dictionary[key]
elif isinstance(value, dict):
del_none(value)
return dictionary
|
48b76272ed20bbee38b5293ede9f5d824950aec5
| 3,643,172
|
def get_table_header(driver):
"""Return Table columns in list form """
header = driver.find_elements(By.TAG_NAME, value= 'th')
header_list = [item.text for index, item in enumerate(header) if index < 10]
return header_list
|
631e71e357beb37f50defe16fe894f5be3356516
| 3,643,174
|
from rx.core.operators.connectable.refcount import _ref_count
from typing import Callable
def ref_count() -> Callable[[ConnectableObservable], Observable]:
"""Returns an observable sequence that stays connected to the
source as long as there is at least one subscription to the
observable sequence.
"""
return _ref_count()
|
e6f8b21e582d46fab75d9013121d764072630390
| 3,643,175
|
def radius_provider_modify(handle, name, **kwargs):
"""
modifies a radius provider
Args:
handle (UcsHandle)
name (string): radius provider name
**kwargs: key-value pair of managed object(MO) property and value, Use
'print(ucscoreutils.get_meta_info(<classid>).config_props)'
to get all configurable properties of class
Returns:
AaaRadiusProvider: managed object
Raises:
UcsOperationError: if AaaRadiusProvider is not present
Example:
radius_provider_modify(handle, name="test_radius_prov", timeout="5")
"""
mo = radius_provider_get(handle, name, caller="radius_provider_modify")
mo.set_prop_multiple(**kwargs)
handle.set_mo(mo)
handle.commit()
return mo
|
9a5c5d62ff60a3a3a8499e4aaa944f758dc49f83
| 3,643,177
|
def _read_table(table_node):
"""Return a TableData object for the 'table' element."""
header = []
rows = []
for node in table_node:
if node.tag == "th":
if header:
raise ValueError("cannot handle multiple headers")
elif rows:
raise ValueError("encountered header after rows")
else:
header = node.text.strip()
elif node.tag == "tr":
rows.append(node.text.strip())
return create_table(header, rows)
|
e6ef6e5d5ec99ea2b15ddfcae61b4dd817f8232b
| 3,643,178
|
def postorder(root: Node):
"""
Post-order traversal visits left subtree, right subtree, root node.
>>> postorder(make_tree())
[4, 5, 2, 3, 1]
"""
return postorder(root.left) + postorder(root.right) + [root.data] if root else []
|
ddeaa6e0f2f466284d69908dfc7eb67bdc6748c8
| 3,643,179
|
def merge_triangulations(groups):
"""
Each entry of the groups list is a list of two (or one) triangulations.
This function takes each pair of triangulations and combines them.
Parameters
----------
groups : list
List of pairs of triangulations
Returns
-------
list
List of merged triangulations
"""
triangulations = []
for group in groups:
if len(group)==2:
# Find the first edges to connect the seperate triangulations
ldi, rdi = lowest_common_tangent(group[0], group[1])
# Combine the two hulls into a single set of edges
base, d_triang = combine_triangulations(ldi, rdi, group[0], group[1])
# Given the starting base edge, fill in the edges between the hulls
d_triang = zip_hulls(base, d_triang)
triangulations.append(d_triang)
else:
triangulations.append(group[0])
return [triangulations[i:i+2] for i in range(0, len(triangulations), 2)]
|
0d39006892e0b248e1f50a62c86911c830b100ce
| 3,643,180
|
def compute_relative_pose(cam_pose, ref_pose):
"""Compute relative pose between two cameras
Args:
cam_pose (np.ndarray): Extrinsic matrix of camera of interest C_i (3,4).
Transforms points in world frame to camera frame, i.e.
x_i = C_i @ x_w (taking into account homogeneous dimensions)
ref_pose (np.ndarray): Extrinsic matrix of reference camera C_r (3,4)
Returns:
relative_pose (np.ndarray): Relative pose of size (3,4). Should transform
points in C_r to C_i, i.e. x_i = M @ x_r
Prohibited functions:
Do NOT use np.linalg.inv() or similar functions
"""
relative_pose = np.zeros((3, 4), dtype=np.float64)
""" YOUR CODE STARTS HERE """
Ri, Rr = cam_pose[:, :-1], ref_pose[:, :-1]
ti, tr = cam_pose[:, -1:], ref_pose[:, -1:]
relative_pose[:, :-1] = Ri @ Rr.T
relative_pose[:, -1:] = ti - Ri @ Rr.T @ tr
""" YOUR CODE ENDS HERE """
return relative_pose
|
b185554b2961bd7cd70df5df714c176f2d5b6dcc
| 3,643,182
|
def determine_clim_by_standard_deviation(color_data, n_std_dev=2.5):
"""Automatically determine color limits based on number of standard
deviations from the mean of the color data (color_data). Useful if there
are outliers in the data causing difficulties in distinguishing most of
the data. Outputs vmin and vmax which can be passed to plotting routine
or plt.clim().
"""
color_data_mean = np.nanmean(color_data)
color_data_std = np.nanstd(color_data)
vmin = color_data_mean - n_std_dev * color_data_std
vmax = color_data_mean + n_std_dev * color_data_std
return vmin, vmax
|
1a8b1240c50a01f645862b7fce76bc93c62bcb26
| 3,643,185
|
def ec_double(point: ECPoint, alpha: int, p: int) -> ECPoint:
"""
Doubles a point on an elliptic curve with the equation y^2 = x^3 + alpha*x + beta mod p.
Assumes the point is given in affine form (x, y) and has y != 0.
"""
assert point[1] % p != 0
m = div_mod(3 * point[0] * point[0] + alpha, 2 * point[1], p)
x = (m * m - 2 * point[0]) % p
y = (m * (point[0] - x) - point[1]) % p
return x, y
|
4489ef72ceb1297983c5f4ac4132fc1e04105365
| 3,643,186
|
def scaled_dot_product_attention(q, k, v, mask):
"""
Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Notice that mask must have the same dimensions as q, k, v.
e.g. if q, k, v are (batch_size, num_heads, seq_len, depth), then the mask
should be also (batch_size, num_heads, seq_len, depth).
However, if q, k, v are (batch_size, seq_len, depth), then the mask should
also not contain num_heads.
Returns:
output (a.k.a. context vectors), scaled_attention_logits
"""
# (..., seq_len_q, seq_len_k)
matmul_qk = tf.matmul(q, k, transpose_b=True)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += (mask * -1e9)
# (..., seq_len_q, seq_len_k)
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
# (..., seq_len_q, depth_v)
output = tf.matmul(attention_weights, v)
return output, scaled_attention_logits
|
22110522c4f33ec30c076240ade20f5b66cb3fcd
| 3,643,187
|
import re
def _parse_message(message):
"""Parses the message.
Splits the message into separators and tags. Tags are named tuples
representing the string ^^type:name:format^^ and they are separated by
separators. For example, in
"123^^node:Foo:${file}^^456^^node:Bar:${line}^^789", there are two tags and
three separators. The separators are the numeric characters.
Supported tags after node:<node_name>
file: Replaced with the filename in which the node was defined.
line: Replaced by the line number at which the node was defined.
Args:
message: String to parse
Returns:
(list of separator strings, list of _ParseTags).
For example, if message is "123^^node:Foo:${file}^^456" then this function
returns (["123", "456"], [_ParseTag("node", "Foo", "${file}")])
"""
seps = []
tags = []
pos = 0
while pos < len(message):
match = re.match(_INTERPOLATION_PATTERN, message[pos:])
if match:
seps.append(match.group(1))
tags.append(_ParseTag(match.group(3), match.group(4), match.group(5)))
pos += match.end()
else:
break
seps.append(message[pos:])
return seps, tags
|
c961f2a49a21682eb247d4138646abd86135c560
| 3,643,188
|
def model_fn(features, labels, mode, params):
"""Model function."""
del labels, params
encoder_module = hub.Module(FLAGS.retriever_module_path)
block_emb = encoder_module(
inputs=dict(
input_ids=features["block_ids"],
input_mask=features["block_mask"],
segment_ids=features["block_segment_ids"]),
signature="projected")
predictions = dict(block_emb=block_emb)
return tf.estimator.tpu.TPUEstimatorSpec(mode=mode, predictions=predictions)
|
ff40f74501f26e880a9cf1421a608240fd059fb8
| 3,643,189
|
def get_rotated_coords(vec, coords):
"""
Given the unit vector (in cartesian), 'vec', generates
the rotation matrix and rotates the given 'coords' to
align the z-axis along the unit vector, 'vec'
Args:
vec, coords - unit vector to rotate to, coordinates
Returns:
rot_coords: rotated coordinates
"""
rot = get_rotation_matrix(vec)
rot_coords = (rot @ coords.T).T
return rot_coords
|
f8504df4b7afef524e4147ce6303055a4b7a3cea
| 3,643,190
|
def merge_on_pids(all_pids, pdict, ddict):
"""
Helper function to merge dictionaries
all_pids: list of all patient ids
pdict, ddict: data dictionaries indexed by feature name
1) pdict[fname]: patient ids
2) ddict[fname]: data tensor corresponding to each patient
"""
set_ids = set(all_pids)
for fname in pdict:
set_ids = set_ids.intersection(set(pdict[fname]))
list_ids = list(set_ids)
list_ids.sort()
print ('merge_on_pids: intersection of patient ids is',len(list_ids))
maxT = 0
for fname in ddict:
maxT = np.max((maxT, ddict[fname][0].shape[1]))
data = np.zeros((len(list_ids), maxT, len(pdict.keys())))
obs = np.zeros_like(data)
for f_idx, fname in enumerate(pdict):
pids_f, (data_f, obs_f) = pdict[fname], ddict[fname]
pids_f = list(pids_f)
index_map = [pids_f.index(pid) for pid in list_ids]
data[:,:maxT, f_idx] = data_f[index_map, :maxT]
obs[:,:maxT, f_idx] = obs_f[index_map, :maxT]
print ('merge_on_pids: after merging, pat_ids, data, obs:', len(list_ids), data.shape, obs.shape)
return np.array(list_ids), data, obs
|
d0968de287a1c62ebb7638e5f1af7bd63041665c
| 3,643,191
|
import requests
import numpy
def do_inference(hostport, work_dir, concurrency, num_tests):
"""Tests PredictionService over Tensor-Bridge.
Args:
hostport: Host:port address of the PredictionService.
work_dir: The full path of working directory for test data set.
concurrency: Maximum number of concurrent requests.
num_tests: Number of test images to use.
Returns:
The classification error rate.
Raises:
IOError: An error occurred processing test data set.
"""
test_data_set = mnist_input_data.read_data_sets(work_dir).test
error = 0
for _ in range(num_tests):
request = predict_pb2.PredictRequest()
request.model_spec.name = 'default'
request.model_spec.signature_name = 'predict_images'
image, label = test_data_set.next_batch(1)
request.inputs['images'].CopyFrom(
tf.contrib.util.make_tensor_proto(image[0], shape=[1, image[0].size]))
response = requests.post(hostport + '/tensor-bridge/v1/prediction',
json=MessageToDict(
request,
preserving_proto_field_name=True,
including_default_value_fields=True))
result = ParseDict(response.json(),
predict_pb2.PredictResponse(),
ignore_unknown_fields=True)
scores = numpy.array(
result.outputs['scores'].float_val)
prediction = numpy.argmax(scores)
if label[0] != prediction:
error += 1
return error / num_tests
|
ed2cc97ccaf6e3a8be2ae690b190640c67365f9d
| 3,643,193
|
def test_true() -> None:
"""This is a test that should always pass. This is just a default test
to make sure tests runs.
Parameters
----------
None
Returns
-------
None
"""
# Always true test.
assert_message = "This test should always pass."
assert True, assert_message
return None
|
f08cb5feb4e450b10b58fe32d751bf45985df84c
| 3,643,195
|
def parseStylesheetFile(filename):
"""Load and parse an XSLT stylesheet"""
ret = libxsltmod.xsltParseStylesheetFile(filename)
if ret == None: return None
return stylesheet(_obj=ret)
|
9e12e7ec5ace9eafe50e595d544bc09ff7ccef7d
| 3,643,196
|
import torch
def tensor_to_index(tensor: torch.tensor, dim=1) -> np.ndarray:
"""Converts a tensor to an array of category index"""
return tensor_to_longs(torch.argmax(tensor, dim=dim))
|
7d72b18086a46c4f1c3f8cebaee28ddac12cf31c
| 3,643,197
|
def _le_(x: symbol, y: symbol) -> symbol:
"""
>>> isinstance(le_(symbol(3), symbol(2)), symbol)
True
>>> le_.instance(3, 2)
False
"""
return x <= y
|
336b164cbc249a1a9e9a3d965950a52ac01292ab
| 3,643,199
|
from typing import List
def convert_country_codes(source_codes: List[str], source_format: str, target_format: str,
throw_error: bool = False) -> List[str]:
"""
Convert country codes, e.g., from ISO_2 to full name.
Parameters
----------
source_codes: List[str]
List of codes to convert.
source_format: str
Format of the source codes (alpha_2, alpha_3, name, ...)
target_format: str
Format to which code must be converted (alpha_2, alpha_3, name, ...)
throw_error: bool (default: False)
Whether to throw an error if an attribute does not exist.
Returns
-------
target_codes: List[str]
List of converted codes.
"""
target_codes = []
for code in source_codes:
try:
country_codes = pyc.countries.get(**{source_format: code})
if country_codes is None:
raise KeyError(f"Data is not available for code {code} of type {source_format}.")
target_code = getattr(country_codes, target_format)
except (KeyError, AttributeError) as e:
if throw_error:
raise e
target_code = np.nan
target_codes += [target_code]
return target_codes
|
7589dec9ccec5edc7bf5ea356b40fac3898c7c77
| 3,643,200
|
def get_simple_lca_length(std_tree, test_gold_dict, node1, node2):
"""
get the corresponding node of node1 and node2 on std tree.
calculate the lca distance between them
Exception:
Exception("[Error: ] std has not been lca initialized yet")
std tree need to be initialized before running this function
example:
std_tree.get_lca_preprocess()
"""
if std_tree.depth_array is None:
raise Exception("[Error: ] std has not been lca initialized yet")
std_id_node_dict = std_tree.get_id_node_dict()
tmp_node1 = node1
tmp_node2 = test_gold_dict[node2]
if tmp_node2 is None:
raise Exception("[Error: ]Can not find the corresponding node in std tree. ")
lca_id = std_tree.get_lca(tmp_node1.get_id(), tmp_node2.get_id())
if lca_id == -1:
return config_utils.DINF
lca_node = std_id_node_dict[lca_id]
return tmp_node1.root_length + tmp_node2.root_length - 2 * lca_node.root_length
|
8433259814fe656bdbdd6997ca613b30c458f8b8
| 3,643,201
|
def edit_catagory(catagory_id):
"""edit catagory"""
name = request.form.get('name')
guest_id = session['guest_id']
exists = db.session.query(Catalogs).filter_by(name=name,
guest_id=guest_id).scalar()
if exists:
return abort(404)
if name == '':
return redirect(url_for('home.home'))
catagory = db.session.query(Catalogs).filter_by(id=catagory_id).one()
oldname = catagory.name
catagory.name = name
db.session.add(catagory)
db.session.commit()
flash(f"Catagory {oldname} has been updated to {catagory.name}")
return redirect(url_for('home.home'))
|
a0d342490881968f39cf4636fb424176c6608e4a
| 3,643,202
|
def match_patterns(name, name_w_pattern, patterns):
"""March patterns to filename.
Given a SPICE kernel name, a SPICE Kernel name with patterns, and the
possible patterns, provide a dictionary with the patterns as keys and
the patterns values as value after matching it between the SPICE Kernel
name with patterns and without patterns.
For example, given the following arguments:
* name: ``insight_v01.tm``
* name_w_pattern: ``insight_v$VERSION.tm``
The function will return: ``{VERSION: '01'}``
:param name: Name of the SPICE Kernel
:type name: str
:param name_w_pattern: Name of the SPICE Kernel with patterns
:type name_w_pattern: str
:param patterns: List of the possible patterns present in the
SPICE Kernel name with patterns
:type patterns: list
:return: Dictionary providing the patterns and their value as defined
by the SPICE kernel
:rtype: dict
"""
#
# This list will help us determine the order of the patterns in the file
# name because later on the patterns need to be correlated with the
# pattern values.
#
pattern_name_order = {}
name_check = name_w_pattern
for pattern in patterns:
pattern_name_order[pattern["#text"]] = name_w_pattern.find(pattern["#text"])
name_check = name_check.replace(
"$" + pattern["#text"], "$" * int(pattern["@length"])
)
#
# Convert the pattern_name_order_dictionary into an ordered lis
#
pattern_name_order = list(
{
k: v
for k, v in sorted(pattern_name_order.items(), key=lambda item: item[1])
}.keys()
)
#
# Generate a list of values extracted from the comparison of the
# original file and the file with patterns.
#
values_list = []
value = ""
value_bool = False
for i in range(len(name_check)):
if (name_check[i] == name[i]) and (not value_bool):
continue
if (name_check[i] == name[i]) and value_bool:
value_bool = False
values_list.append(value)
value = ""
elif (name_check[i] == "$") and (not value_bool):
value_bool = True
value += name[i]
elif (name_check[i] == "$") and value_bool:
value += name[i]
else:
raise
#
# Correlate the values with their position in the file name with
# patterns.
#
values = {}
for i in range(len(values_list)):
values[pattern_name_order[i]] = values_list[i]
return values
|
a54b7f1fcda67b5649f92a21f4711874dd226ee9
| 3,643,203
|
def _generate_good_delivery_token_email(request, good_delivery, msg=''):
"""
Send an email to user with good_delivery activation URL
and return the token
:type request: HttpRequest
:type good_delivery: GoodDelivery
:type msg: String
:param structure_slug: current HttpRequest
:param structure_slug: good delivery to confirm
:param structure: message to send
:return: generated token
"""
if good_delivery.delivered_to.email:
# build good_delivery jwt
token = good_delivery.build_jwt()
# build absolute URI, attach token and send email
uri = request.build_absolute_uri(reverse('good_delivery:user_use_token'))
mail_params = {'hostname': settings.HOSTNAME,
'user': good_delivery.delivered_to,
'url': '{}?token={}'.format(uri, token),
'added_text': msg
}
m_subject = _('{} - {}').format(settings.HOSTNAME, good_delivery)
send_custom_mail(subject=m_subject,
recipients=[good_delivery.delivered_to],
body=settings.NEW_DELIVERY_WITH_TOKEN_CREATED,
params=mail_params)
return token
|
4567c3d0ad3f2d65c850ed5291e602cb552b11cb
| 3,643,204
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.