content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from typing import Tuple
def disconnect() -> Tuple[str, int]:
"""Deletes the DroneServerThread with a given id.
Iterates over all the drones in the shared list and deletes the one with a
matching drone_id. If none are found returns an error.
Request:
drone_id (str): UUID of the drone.
Response:
Tuple[str, int]: Response status.
200, "OK" - Drone disconnected created successfully.
400, "Bad Request" - Incorrect drone_id.
"""
# Check if the json is correct and making a variable
if not 'uuid' in request.json or request.json["uuid"] == "":
return "Bad Request", 400
drone_id = request.json["uuid"]
# Iterates through the array and checks elements
drones_lock, drone_ts = common_variables.get_drone_ts()
drones_lock.acquire()
for drone in drone_ts:
if drone_id == drone.drone_id:
ports_lock, ports_assigned = common_variables.get_ports_assigned()
ports_assigned.remove(drone.ports[0])
ports_assigned.remove(drone.ports[1])
ports_assigned.remove(drone.ports[2])
drone_ts.remove(drone)
drones_lock.release()
return "OK", 200
drones_lock.release()
return "Bad Request", 400
|
c69192ccdc73c27089952d3a27c3ff79dfb932a5
| 3,639,948
|
import torch
def get_graph_feature(x, k=20, idx=None, x_coord=None):
"""
Args:
x: (B, d, N)
"""
batch_size = x.size(0)
num_points = x.size(2)
x = x.view(batch_size, -1, num_points)
if idx is None:
if x_coord is None: # dynamic knn graph
idx = knn(x, k=k)
else: # fixed knn graph with input point coordinates
idx = knn(x_coord, k=k)
if k is None: k = idx.size(-1)
_, num_dims, _ = x.size()
feature = gather(x, idx)
x = x.transpose(2, 1).contiguous()
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature-x, x), dim=3).permute(0, 3, 1, 2).contiguous() # (B, d, N, K)
return feature
|
e895a1663fb716846af0976a3203045509591a6e
| 3,639,949
|
def get_markers(
image_array: np.ndarray,
evened_selem_size: int = 4,
markers_contrast_times: float = 15,
markers_sd: float = 0.25,
) -> np.ndarray:
"""Finds the highest and lowest grey scale values for image flooding."""
selem = smo.disk(evened_selem_size)
evened = sfi.rank.mean_bilateral(
inc_contrast(image_array, contrast_times=markers_contrast_times), selem
)
# Markers defined by highest and lowest grey levels set as markers
high = np.max(evened)
low = np.min(evened)
std = np.std(evened)
neatarray = np.array(image_array)
markers: np.ndarray = np.zeros_like(neatarray)
# Level reduced/decreased by 1/4 SD
markers[evened < low + (markers_sd * std)] = 3
markers[evened > high - (markers_sd * std)] = 2
return markers
|
865d2f5170b85a54902aabdfaee61199359e7d90
| 3,639,950
|
def pd_bigdata_read_csv(file, **pd_read_csv_params):
"""
读取速度提升不明显
但是内存占用显著下降
"""
reader = pd.read_csv(file, **pd_read_csv_params, iterator=True)
loop = True
try:
chunk_size = pd_read_csv_params['chunksize']
except:
chunk_size = 1000000
chunks = []
while loop:
try:
chunk = reader.get_chunk(chunk_size)
chunks.append(chunk)
except StopIteration:
loop = False
print('[Info]: Iteration is stopped.')
df = pd.concat(chunks, ignore_index=True, axis=0)
return df
|
0350e543bc10da5165b97b18c83d6f848cbbc503
| 3,639,951
|
import numpy
def PCA(Y_name, input_dim):
"""
Principal component analysis: maximum likelihood solution by SVD
Adapted from GPy.util.linalg
Arguments
---------
:param Y: NxD np.array of data
:param input_dim: int, dimension of projection
Returns
-------
:rval X: - Nxinput_dim np.array of dimensionality reduced data
W - input_dimxD mapping from X to Y
"""
Y = genfromtxt(Y_name, delimiter=',')
Z = numpy.linalg.svd(Y - Y.mean(axis=0), full_matrices=False)
[X, W] = [Z[0][:, 0:input_dim], numpy.dot(numpy.diag(Z[1]), Z[2]).T[:, 0:input_dim]]
v = X.std(axis=0)
X /= v;
W *= v;
return X
|
0d49a1c8470cba2d6d56a4ce191449b3106e8a93
| 3,639,952
|
import collections
def _get_sequence(value, n, channel_index, name):
"""Formats a value input for gen_nn_ops."""
# Performance is fast-pathed for common cases:
# `None`, `list`, `tuple` and `int`.
if value is None:
return [1] * (n + 2)
# Always convert `value` to a `list`.
if isinstance(value, list):
pass
elif isinstance(value, tuple):
value = list(value)
elif isinstance(value, int):
value = [value]
elif not isinstance(value, collections.abc.Sized):
value = [value]
else:
value = list(value) # Try casting to a list.
len_value = len(value)
# Fully specified, including batch and channel dims.
if len_value == n + 2:
return value
# Apply value to spatial dims only.
if len_value == 1:
value = value * n # Broadcast to spatial dimensions.
elif len_value != n:
raise ValueError('{} should be of length 1, {} or {} but was {}'.format(
name, n, n + 2, len_value))
# Add batch and channel dims (always 1).
if channel_index == 1:
return [1, 1] + value
else:
return [1] + value + [1]
|
e2ac408cf299f186bb74fa4b1decc885b1229f9d
| 3,639,954
|
def make_linear(input_dim, output_dim, bias=True, std=0.02):
"""
Parameters
----------
input_dim: int
output_dim: int
bias: bool
std: float
Returns
-------
torch.nn.modules.linear.Linear
"""
linear = nn.Linear(input_dim, output_dim, bias)
init.normal_(linear.weight, std=std)
if bias:
init.zeros_(linear.bias)
return linear
|
57361cadbf3121501da65c3f2f37e61404bc26e3
| 3,639,955
|
def matnorm_logp_conditional_col(x, row_cov, col_cov, cond, cond_cov):
"""
Log likelihood for centered conditional matrix-variate normal density.
Consider the following partitioned matrix-normal density:
.. math::
\\begin{bmatrix}
\\operatorname{vec}\\left[\\mathbf{X}_{i j}\\right] \\\\
\\operatorname{vec}\\left[\\mathbf{Y}_{i k}\\right]\\end{bmatrix}
\\sim \\mathcal{N}\\left(0,\\begin{bmatrix} \\Sigma_{j} \\otimes
\\Sigma_{i} & \\Sigma_{j k} \\otimes \\Sigma_{i}\\\\
\\Sigma_{k j} \\otimes \\Sigma_{i} & \\Sigma_{k} \\otimes \\Sigma_{i}
\\end{bmatrix}\\right)
Then we can write the conditional:
.. math::
\\mathbf{X}_{i j} \\mid \\mathbf{Y}_{i k} \\sim \\mathcal{M}\\
\\mathcal{N}\\left(0, \\Sigma_{i}, \\Sigma_{j}-\\Sigma_{j k}\\
\\Sigma_{k}^{-1} \\Sigma_{k j}\\right)
This function efficiently computes the conditionals by unpacking some
info in the covariance classes and then dispatching to
`solve_det_conditional`.
Parameters
---------------
x: tf.Tensor
Observation tensor
row_cov: CovBase
Row covariance (:math:`\\Sigma_{i}` in the notation above).
col_cov: CovBase
Column covariance (:math:`\\Sigma_{j}` in the notation above).
cond: tf.Tensor
Off-diagonal block of the partitioned covariance (:math:`\\Sigma_{jk}`
in the notation above).
cond_cov: CovBase
Covariance of conditioning variable (:math:`\\Sigma_{k}` in the
notation above).
"""
rowsize = tf.cast(tf.shape(input=x)[0], "float64")
colsize = tf.cast(tf.shape(input=x)[1], "float64")
solve_row = row_cov.solve(x)
logdet_row = row_cov.logdet
solve_col, logdet_col = solve_det_conditional(
tf.transpose(a=x), col_cov, tf.transpose(a=cond), cond_cov
)
return _mnorm_logp_internal(
colsize, rowsize, logdet_row, logdet_col, solve_row, solve_col
)
|
0970ba5a2f67a6156a6077dbd05e2d1cca331476
| 3,639,956
|
def get_next_by_date(name, regexp):
"""Get the next page by page publishing date"""
p = Page.get(Page.name == name)
query = (Page.select(Page.name, Page.title)
.where(Page.pubtime > p.pubtime)
.order_by(Page.pubtime.asc())
.dicts())
for p in ifilter(lambda x: regexp.match(x['name']), query):
return p
|
16e956508c1ccbdf444e84ad769848124449ab84
| 3,639,958
|
def generate_raw_mantissa_extraction(optree):
""" generate an operation graph to extraction the significand field
of floating-point node <optree> (may be scalar or vector).
The implicit bit is not injected in this raw version """
if optree.precision.is_vector_format():
base_precision = optree.precision.get_scalar_format()
vector_size = optree.precision.get_vector_size()
int_precision = {
v2float32: v2int32,
v2float64: v2int64,
v4float32: v4int32,
v4float64: v4int64,
v8float32: v8int32,
v8float64: v8int64,
}[optree.precision]
else:
int_precision = optree.precision.get_integer_format()
base_precision = optree.precision
return generate_field_extraction(
optree,
int_precision,
0,
base_precision.get_field_size() - 1,
)
|
f1f0b38f0c68e997ade20ead827f71427104d138
| 3,639,960
|
import time
def read_temp_f(p):
"""
read_temp_f
Returns the temperature from the probe in degrees farenheit
p = 1-Wire device file
"""
lines = read_temp_raw(p)
while lines[0].strip()[-3:] != 'YES':
time.sleep(0.2)
lines = read_temp_raw(p)
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos + 2:]
temp_f = temp_string * 9.0 / 5.0 + 32.0
return temp_f
|
52114550688f06c8f58dfe37f7c0faa4d93715a2
| 3,639,961
|
def count_parameters(model, trainable_only=True, is_dict=False):
"""
Count number of parameters in a model or state dictionary
:param model:
:param trainable_only:
:param is_dict:
:return:
"""
if is_dict:
return sum(np.prod(list(model[k].size())) for k in model)
if trainable_only:
return sum(p.numel() for p in model.parameters() if p.requires_grad)
else:
return sum(p.numel() for p in model.parameters())
|
8e95c3302eca217c694bb4c5262c0196254505fb
| 3,639,962
|
def setup_conf(conf=cfg.CONF):
"""Setup the cfg for the status check utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during checks.
"""
common_config.register_common_config_options()
neutron_conf_base.register_core_common_config_opts(conf)
neutron_conf_service.register_service_opts(
neutron_conf_service.SERVICE_OPTS, cfg.CONF)
db_options.set_defaults(conf)
return conf
|
c5ebcc4516e317fc558d8bddeb74343b7006c999
| 3,639,963
|
import pathlib
def release_kind():
"""
Determine which release to make based on the files in the
changelog.
"""
# use min here as 'major' < 'minor' < 'patch'
return min(
'major' if 'breaking' in file.name else
'minor' if 'change' in file.name else
'patch'
for file in pathlib.Path('changelog.d').iterdir()
)
|
115f75c1e0f1e8b02916db518e3983462d9bc19c
| 3,639,964
|
import re
def edit_text_file(filepath: str, regex_search_string: str, replace_string: str):
"""
This function is used to replace text inside a file.
:param filepath: the path where the file is located.
:param regex_search_string: string used in the regular expression to find what has to be replaced.
:param replace_string: the string which will replace all matches found using regex_search_string.
:return: None
:raise RuntimeError: if regex_search_string doesn't find any match.
"""
# open the file and read the content
with open(filepath, "r") as f:
text_file = f.read()
# find all matches
matches = re.finditer(regex_search_string, text_file)
if matches is None:
raise RuntimeError("No match has been found using the given regex_search_string!")
# replace all matches with replace_string
for match in matches:
text_file = text_file.replace(match.group(0), replace_string)
# overwrite the file
with open(filepath, "w") as f:
f.write(text_file)
return None
|
e0f5945a96f755a9c289262c3d19552c0e1b40fd
| 3,639,965
|
def find_sums(sheet):
"""
Tallies the total assets and total liabilities for each person.
RETURNS:
Tuple of assets and liabilities.
"""
pos = 0
neg = 0
for row in sheet:
if row[-1] > 0:
pos += row[-1]
else:
neg += row[-1]
return pos, neg
|
351e13d6915288268a56d8292c470fe354fa9842
| 3,639,966
|
def read_links(title):
"""
Reads the links from a file in directory link_data.
Assumes the file exists, as well as the directory link_data
Args:
title: (Str) The title of the current wiki file to read
Returns a list of all the links in the wiki article with the name title
"""
with open(f"link_data/{title}", "r") as f:
read_data = f.read()
return read_data.split("\n")[:-1]
|
50f128bcf4cd36bc783bc848ab2e6b6280973ea3
| 3,639,967
|
def test_compile_model_from_params():
"""Tests that if build_fn returns an un-compiled model,
the __init__ parameters will be used to compile it
and that if build_fn returns a compiled model
it is not re-compiled.
"""
# Load data
data = load_boston()
X, y = data.data[:100], data.target[:100]
losses = ("mean_squared_error", "mean_absolute_error")
# build_fn that does not compile
def build_fn(compile_with_loss=None):
model = Sequential()
model.add(keras.layers.Dense(X.shape[1], input_shape=(X.shape[1],)))
model.add(keras.layers.Activation("relu"))
model.add(keras.layers.Dense(1))
model.add(keras.layers.Activation("linear"))
if compile_with_loss:
model.compile(loss=compile_with_loss)
return model
for loss in losses:
estimator = KerasRegressor(
model=build_fn,
loss=loss,
# compile_with_loss=None returns an un-compiled model
compile_with_loss=None,
)
estimator.fit(X, y)
assert estimator.model_.loss.__name__ == loss
for myloss in losses:
estimator = KerasRegressor(
model=build_fn,
loss="binary_crossentropy",
# compile_with_loss != None overrides loss
compile_with_loss=myloss,
)
estimator.fit(X, y)
assert estimator.model_.loss == myloss
|
a4cbc7b4dbc4d9836766c37d8eb1cfdd3d5c324e
| 3,639,968
|
import numpy
def writeFEvalsMaxSymbols(fevals, maxsymbols, isscientific=False):
"""Return the smallest string representation of a number.
This method is only concerned with the maximum number of significant
digits.
Two alternatives:
1) modified scientific notation (without the trailing + and zero in
the exponent)
2) float notation
:returns: string representation of a number of function evaluations
or ERT.
"""
#Compared to writeFEvals2?
#Printf:
# %[flags][width][.precision][length]specifier
assert not numpy.isnan(fevals)
if numpy.isinf(fevals):
return r'$\infty$'
#repr1 is the alternative scientific notation
#repr2 is the full notation but with a number of significant digits given
#by the variable precision.
# modified scientific notation:
#smallest representation of the decimal part
#drop + and starting zeros of the exponent part
repr1 = (('%.' + str(maxsymbols) + 'e') % fevals)
size1 = len(repr1)
tmp = repr1.split('e', 1)
tmp2 = tmp[-1].lstrip('+-0')
if float(tmp[-1]) < 0:
tmp2 = '-' + tmp2
tmp[-1] = tmp2
remainingsymbols = max(maxsymbols - len(tmp2) - 2, 0)
tmp[0] = (('%.' + str(remainingsymbols) + 'f') % float(tmp[0]))
repr1 = 'e'.join(tmp)
#len(repr1) <= maxsymbols is not always the case but should be most usual
tmp = '%.0f' % fevals
remainingsymbols = max(maxsymbols - len(tmp), 0)
repr2 = (('%.' + str(remainingsymbols) + 'f') % fevals)
tmp = repr2.split('.', 1)
if len(tmp) > 1:
tmp[-1] = tmp[-1].rstrip('0')
repr2 = '.'.join(tmp)
repr2 = repr2.rstrip('.')
#set_trace()
if len(repr1)-repr1.count('.') < len(repr2)-repr2.count('.') or isscientific:
return repr1
#tmp1 = '%4.0f' % bestalgdata[-1]
#tmp2 = ('%2.2g' % bestalgdata[-1]).split('e', 1)
#if len(tmp2) > 1:
# tmp2[-1] = tmp2[-1].lstrip('+0')
# tmp2 = 'e'.join(tmp2)
# tmp = tmp1
# if len(tmp1) >= len(tmp2):
# tmp = tmp2
# curline.append(r'\multicolumn{2}{c|}{%s}' % tmp)
return repr2
|
a5434c5f6e845473f2187b969e4fa42538a95633
| 3,639,969
|
def closedcone(r=1, h=5, bp=[0,0,0], sampH=360, sampV=50, fcirc=20):
"""
Returns parametrization of a closed cone with radius 'r' and height 'h at
basepoint (bpx,bpy,bpz), where 'sampH' and 'sampV' specify the amount of
samples used horizontally, i.e. for circles, and vertically, i.e.
for height, and 'fcirc' specifies the amount
of circles that fill the bottom of the cone with radius 'r',
The base point is in the cones's center at the bottom.
The default values are 1, 5, (0,0,0), 360 and 50 for the radius, center,
and amount of horizontal and vertical samples, respectively.
"""
bpx, bpy, bpz = bp
theta0 = np.linspace(0, 2*np.pi, sampH)
z = np.linspace(bpz, bpz+h, sampV)
theta, z = np.meshgrid(theta0, z)
r = np.linspace(r, 0, sampV)
theta, r = np.meshgrid(theta0, r)
x = r * np.cos(theta) - bpx
y = r * np.sin(theta) - bpy
xcirc, ycirc, zcirc = filledcircle(r=r,c=[bpx,bpy,bpz], sampH=sampH,
fcirc=fcirc)
x = np.append(x,xcirc,0)
y = np.append(y,ycirc,0)
z = np.append(z,zcirc,0)
return x, y, z
|
8cbf46f0a626d8cc858bab004a21dd9eb189a3eb
| 3,639,970
|
def E_lndetW_Wishart(nu,V):
"""
mean of log determinant of precision matrix over Wishart <lndet(W)>
input
nu [float] : dof parameter of Wichart distribution
V [ndarray, shape (D x D)] : base matrix of Wishart distribution
"""
if nu < len(V) + 1:
raise ValueError, "dof parameter nu must larger than len(V)"
D = len(V)
E = D*np.log(2.0) - np.log(det(V)) + \
digamma(np.arange(nu+1-D,nu+1)*0.5).sum()
return E
|
1fa84eb843c91b66b3937b7542be31c00faf002d
| 3,639,971
|
def crop_range_image(range_images, new_width, shift=None, scope=None):
"""Crops range image by shrinking the width.
Requires: new_width is smaller than the existing width.
Args:
range_images: [B, H, W, ...]
new_width: an integer.
shift: a list of integer of same size as batch that shifts the crop window.
Positive is right shift. Negative is left shift. We assume the shift keeps
the window inside the image (i.e. no wrap).
scope: the name scope.
Returns:
range_image_crops: [B, H, new_width, ...]
"""
# pylint: disable=unbalanced-tuple-unpacking
shape = _combined_static_and_dynamic_shape(range_images)
batch = shape[0]
width = shape[2]
if width == new_width:
return range_images
if new_width < 1:
raise ValueError('new_width must be positive.')
if width is not None and new_width >= width:
raise ValueError('new_width {} should be < the old width {}.'.format(
new_width, width))
if shift is None:
shift = [0] * batch
diff = width - new_width
left = [diff // 2 + i for i in shift]
right = [i + new_width for i in left]
for l, r in zip(left, right):
if l < 0 or r > width:
raise ValueError(
'shift {} is invalid given new_width {} and width {}.'.format(
shift, new_width, width))
range_image_crops = []
with tf.compat.v1.name_scope(scope, 'CropRangeImage', [range_images]):
for i in range(batch):
range_image_crop = range_images[i, :, left[i]:right[i], ...]
range_image_crops.append(range_image_crop)
return tf.stack(range_image_crops, axis=0)
|
364dc2e1e77052327e3517fb35c0223463179a69
| 3,639,972
|
import string
import random
def randomString(length):
"""Generates a random string of LENGTH length."""
chars = string.letters + string.digits
s = ""
for i in random.sample(chars, length):
s += i
return s
|
fff13713271b3064b4e42c42c420aad190475d85
| 3,639,973
|
def DrawMACCloseButton(colour, backColour=None):
"""
Draws the wxMAC tab close button using wx.GraphicsContext.
:param `colour`: the colour to use to draw the circle.
"""
bmp = wx.EmptyBitmapRGBA(16, 16)
dc = wx.MemoryDC()
dc.SelectObject(bmp)
gc = wx.GraphicsContext.Create(dc)
gc.SetBrush(wx.Brush(colour))
path = gc.CreatePath()
path.AddCircle(6.5, 7, 6.5)
path.CloseSubpath()
gc.FillPath(path)
path = gc.CreatePath()
if backColour is not None:
pen = wx.Pen(backColour, 2)
else:
pen = wx.Pen("white", 2)
pen.SetCap(wx.CAP_BUTT)
pen.SetJoin(wx.JOIN_BEVEL)
gc.SetPen(pen)
path.MoveToPoint(3.5, 4)
path.AddLineToPoint(9.5, 10)
path.MoveToPoint(3.5, 10)
path.AddLineToPoint(9.5, 4)
path.CloseSubpath()
gc.DrawPath(path)
dc.SelectObject(wx.NullBitmap)
return bmp
|
96982b68aa926341d7ab74d7ed705c19c232392e
| 3,639,974
|
def dispatch(args, validator):
"""
'dispath' set in the 'validator' object the level of validation
chosen by the user. By default, the validator
makes topology level validation.
"""
print("Printing all the arguments: {}\n".format(args))
if args.vnfd:
print("VNFD validation")
validator.schema_validator.load_schemas("VNFD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False,
custom=False)
elif args.integrity:
print("Syntax and integrity validation")
validator.configure(syntax=True, integrity=True, topology=False,
custom=False)
elif args.topology:
print("Syntax, integrity and topology validation")
validator.configure(syntax=True, integrity=True, topology=True,
custom=False)
elif args.custom:
validator.configure(syntax=True, integrity=True, topology=True,
custom=True, cfile=args.cfile)
print("Syntax, integrity, topology and custom rules validation")
else:
print("Default mode: Syntax, integrity and topology validation")
if validator.validate_function(args.vnfd):
if ((validator.error_count == 0) and
(len(validator.customErrors) == 0)):
print("No errors found in the VNFD")
else:
print("Errors in validation")
return validator
elif args.nsd:
print("NSD validation")
validator.schema_validator.load_schemas("NSD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False)
elif args.integrity:
print("Syntax and integrity validation")
validator.configure(syntax=True, integrity=True, topology=False,
dpath=args.dpath)
elif args.topology:
print("Syntax, integrity and topology validation")
validator.configure(syntax=True, integrity=True, topology=True,
dpath=args.dpath)
elif args.custom:
validator.configure(syntax=True, integrity=True, topology=True,
custom=True, cfile=args.cfile,
dpath=args.dpath)
print("Syntax, integrity, topology and custom rules validation")
else:
validator.configure(syntax=True, integrity=True, topology=True,
dpath=args.dpath)
print("Default mode: Syntax, integrity and topology validation")
if validator.validate_service(args.nsd):
if ((validator.error_count == 0) and (len(validator.customErrors) == 0)):
print("No errors found in the Service descriptor validation")
else:
print("Errors in custom rules validation")
return validator
elif args.project_path:
print("Project descriptor validation")
validator.schema_validator.load_schemas("NSD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False,
workspace_path=args.workspace_path)
elif args.integrity:
print("Syntax and integrity validation")
validator.configure(syntax=True, integrity=True, topology=False,
workspace_path=args.workspace_path)
elif args.topology:
print("Syntax, integrity and topology validation")
validator.configure(syntax=True, integrity=True, topology=True,
workspace_path=args.workspace_path)
elif args.custom:
validator.configure(syntax=True, integrity=True, topology=True,
custom=True, cfile=args.cfile)
print("Syntax, integrity, topology and custom rules validation")
else:
print("Default mode: Syntax, integrity and topology validation")
if not validator.validate_project(args.project_path):
print('Cant validate the project descriptors')
else:
if validator.error_count == 0:
if len(validator.customErrors) == 0:
print("No errors found in the validation of the project descriptors")
else:
print("Errors in custom rules validation")
return validator
elif args.tstd:
print("Test descriptor validation")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False, custom=False)
elif args.integrity:
print("Integrity validation")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
else:
print("Default test descriptor validation syntax and integrity")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
if not validator.validate_test(args.tstd):
print('Cant validate the test descriptors')
else:
if validator.error_count == 0 and len(validator.customErrors) == 0:
print("No errors found in the validation of the test descriptors")
else:
print("Errors in validation")
return validator
elif args.nstd:
print("Slice descriptor validation")
validator.schema_validator.load_schemas("NSTD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False, custom=False)
elif args.integrity:
print("Integrity validation")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
else:
print("Default test descriptor validation syntax and integrity")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
if not validator.validate_slice(args.nstd):
print('Cant validate the slice descriptors')
else:
if validator.error_count == 0 and len(validator.customErrors) == 0:
print("No errors found in the validation of the slice descriptors")
else:
print("Errors in validation")
return validator
elif args.slad:
print("SLA descriptor validation")
validator.schema_validator.load_schemas("SLAD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False, custom=False)
elif args.integrity:
print("Integrity validation")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
else:
print("Default test descriptor validation syntax and integrity")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
if not validator.validate_sla(args.slad):
print('Cant validate the sla descriptors')
else:
if validator.error_count == 0 and len(validator.customErrors) == 0:
print("No errors found in the validation of the sla descriptors")
else:
print("Errors in validation")
return validator
elif args.rpd:
print("RP descriptor validation")
validator.schema_validator.load_schemas("RPD")
if args.syntax:
print("Syntax validation")
validator.configure(syntax=True, integrity=False, topology=False, custom=False)
elif args.integrity:
print("Integrity validation")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
else:
print("Default test descriptor validation syntax and integrity")
validator.configure(syntax=True, integrity=True, topology=False, custom=False)
if not validator.validate_runtime_policy(args.rpd):
print('Cant validate the sla descriptors')
else:
if validator.error_count == 0 and len(validator.customErrors) == 0:
print("No errors found in the validation of the sla descriptors")
else:
print("Errors in validation")
return validator
|
b2625b5cb46295d0790b37fa691b8a4d60341e47
| 3,639,975
|
def create_app():
"""Create and configure and instance of the Flask application"""
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB.init_app(app)
@app.route('/')
def home():
return render_template('base.html')
@app.route('/load')
def db_load_city():
df_city = api.cities(df=True)
load_cities(df_city)
return render_template('base.html', title='Cities Loaded')
@app.route('/countries')
def db_load_country():
df_country = api.countries(df=True)
load_countries(df_country)
return render_template('base.html', title='Countries Loaded')
return app
|
1e122846bfdfc68a1143eb2d53b87eda9ae9cff6
| 3,639,976
|
def get_motif_class(motif: str) -> str:
"""Return the class of the given motif."""
for mcls in gen_motif_classes(len(motif), len(motif) + 1):
for m in motif_set(mcls):
if m == motif:
return mcls
else:
raise ValueError(
"Unable to find the class of the given motif. "
"Maybe it contains a character other than ['A', 'C', 'G', 'T']?"
)
|
fea293fcf25b77bbf78c400facf450067c94be2b
| 3,639,978
|
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode. If this is set
to None, the callers can specify slim.batch_norm's is_training parameter
from an outer slim.arg_scope.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with (slim.arg_scope([slim.batch_norm], is_training=is_training)
if is_training is not None else NoOpScope()):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride,
store_non_strided_activations)
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
|
b2008da41f5ada502941c058134ded4d95c3d5c0
| 3,639,979
|
def findConstell(cc):
"""
input is one character (from rinex satellite line)
output is integer added to the satellite number
0 for GPS, 100 for Glonass, 200 for Galileo, 300 for everything else?
author: kristine larson, GFZ, April 2017
"""
if (cc == 'G' or cc == ' '):
out = 0
elif (cc == 'R'): # glonass
out = 100
elif (cc == 'E'): # galileo
out = 200
else:
out = 300
return out
|
d7a85fc5f7324acdb5277fd6db458523cd4ad4b8
| 3,639,980
|
def Controller(idx):
"""(read-only) Full name of the i-th controller attached to this element. Ex: str = Controller(2). See NumControls to determine valid index range"""
return get_string(lib.CktElement_Get_Controller(idx))
|
5adb2f806133319546ea627c705579a3a7e662dd
| 3,639,981
|
def smooth_2d_map(bin_map, n_bins=5, sigma=2, apply_median_filt=True, **kwargs):
"""
:param bin_map: map to be smooth.
array in which each cell corresponds to the value at that xy position
:param n_bins: number of smoothing bins
:param sigma: std for the gaussian smoothing
:return: sm_map: smoothed map. note that this is a truncated sigma map, meaning that high or
low values wont affect far away bins
"""
if apply_median_filt:
sm_map = ndimage.filters.median_filter(bin_map, n_bins)
else:
sm_map = bin_map
trunc = (((n_bins - 1) / 2) - 0.5) / sigma
return ndimage.filters.gaussian_filter(sm_map, sigma, mode='constant', truncate=trunc)
|
a1d8c9b2b8107663746d2c1af9e129d7226e9d0b
| 3,639,982
|
import socket
def _select_socket(lower_port, upper_port):
"""Create and return a socket whose port is available and adheres to the given port range, if applicable."""
sock = socket(AF_INET, SOCK_STREAM)
found_port = False
retries = 0
while not found_port:
try:
sock.bind(('0.0.0.0', _get_candidate_port(lower_port, upper_port)))
found_port = True
except Exception:
retries = retries + 1
if retries > max_port_range_retries:
raise RuntimeError(
"Failed to locate port within range {}..{} after {} retries!".
format(lower_port, upper_port, max_port_range_retries))
return sock
|
19427fd0146b5537c6fab898b5e3e0868c8c4a21
| 3,639,983
|
def _factory(cls_name, parent_cls, search_nested_subclasses=False):
"""Return subclass from parent
Args:
cls_name (basestring)
parent_cls (cls)
search_nested_subclasses (bool)
Return:
cls
"""
member_cls = None
subcls_name = _filter_out_underscore(cls_name.lower())
members = (_all_subclasses(parent_cls) if search_nested_subclasses else
parent_cls.__subclasses__())
for member_cls in members:
if member_cls.__name__.lower() == subcls_name:
break
else:
raise exception.NoClassFound(
"%s for parent %s" % (subcls_name, parent_cls))
return member_cls
|
2eb5fb4c3333aaddec418ebac8ecdd824ff4e8ba
| 3,639,985
|
def tabuleiro_actualiza_pontuacao(t,v):
"""list x int -> list
Esta funcao recebe um elemento tabuleiro do tipo lista e um elemento v do tipo inteiro e modifica o tabuleiro, acrescentando ao valor da pontuacao v pontos"""
if isinstance(v,int) and v%4==0 and v>=0:
t[4]=tabuleiro_pontuacao(t)+v
return t
else:
raise ValueError('tabuleiro_actualiza_pontuacao: argumentos invalidos')
|
a247f2c14ffd42fc4d77ae9871ccc08bd967296d
| 3,639,986
|
def showcase_code(pyfile,class_name = False, method_name = False, end_string = False):
"""shows content of py file"""
with open(pyfile) as f:
code = f.read()
if class_name:
#1. find beginning (class + <name>)
index = code.find(f'class {class_name}')
code = code[index:]
#2. find end (class (new class!) or end of script)
end_index = code[7:].find('class')
if method_name:
#1. find beginning (class + <name>)
index = code.find(f'def {method_name}')
code = code[index:]
#2. find end (class (new class!) or end of script)
end_index = code[7:].find('def')
if end_string:
end_index = code[7:].find('# helpers')
code = code[:end_index]
formatter = HtmlFormatter()
return IPython.display.HTML('<style type="text/css">{}</style>{}'.format(
formatter.get_style_defs('.highlight'),
highlight(code, PythonLexer(), formatter)))
|
fe62a99adf5f97164ac69e68554f31d20e126dfa
| 3,639,988
|
def get_hyperparams(data, ind):
"""
Gets the hyperparameters for hyperparameter settings index ind
data : dict
The Python data dictionary generated from running main.py
ind : int
Gets the returns of the agent trained with this hyperparameter
settings index
Returns
-------
dict
The dictionary of hyperparameters
"""
return data["experiment_data"][ind]["agent_hyperparams"]
|
3734f4cf00564a1aa7c852091d366e6e42b6d55b
| 3,639,989
|
from typing import Dict
from typing import Any
from typing import Tuple
def _check_df_params_require_iter(
func_params: Dict[str, ParamAttrs],
src_df: pd.DataFrame,
func_kwargs: Dict[str, Any],
**kwargs,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Return params that require iteration and those that don't."""
list_params: Dict[str, Any] = {}
df_iter_params: Dict[str, Any] = {}
for kw_name, arg in kwargs.items():
if kw_name in _DEF_IGNORE_PARAM:
continue
if (
arg not in src_df.columns
or not isinstance(func_kwargs.get(kw_name), str)
or kw_name not in func_params
):
# Not intended/usable as a column specification
continue
col_name = func_kwargs.pop(kw_name)
if func_params[kw_name].type == "list":
# If the parameter accepts iterable types try to use the
# values of that column directly
list_params[kw_name] = list(src_df[col_name].values)
# But also store it as a param that we might need to iterate through
df_iter_params[kw_name] = col_name
return df_iter_params, list_params
|
e66a42a173f24a33f2457bf6b8cfe4124984f646
| 3,639,990
|
def _inverse_permutation(p):
"""inverse permutation p"""
n = p.size
s = np.zeros(n, dtype=np.int32)
i = np.arange(n, dtype=np.int32)
np.put(s, p, i) # s[p] = i
return s
|
0e8a4cf7156c9dac6a3bb89eb3edb8960478d7b6
| 3,639,995
|
def blend0(d=0.0, u=1.0, s=1.0):
"""
blending function trapezoid
d = delta x = xabs - xdr
u = uncertainty radius of xabs estimate error
s = tuning scale factor
returns blend
"""
d = float(abs(d))
u = float(abs(u))
s = float(abs(s))
v = d - u #offset by radius
if v >= s: #first so if s == 0 catches here so no divide by zero below
b = 0.0
elif v <= 0.0:
b = 1.0
else: # 0 < v < s
b = 1.0 - (v / s)
return b
|
d501db66c34f28421c1517dcd3052fa7b2ee8643
| 3,639,996
|
def median(a, dim=None):
"""
Calculate median along a given dimension.
Parameters
----------
a: af.Array
The input array.
dim: optional: int. default: None.
The dimension for which to obtain the median from input data.
Returns
-------
output: af.Array
Array containing the median of the input array along a
given dimension.
"""
if dim is not None:
out = Array()
safe_call(backend.get().af_median(c_pointer(out.arr), a.arr, c_int_t(dim)))
return out
else:
real = c_double_t(0)
imag = c_double_t(0)
safe_call(backend.get().af_median_all(c_pointer(real), c_pointer(imag), a.arr))
real = real.value
imag = imag.value
return real if imag == 0 else real + imag * 1j
|
0a117fe2f072747e752e77613dc658812630dacc
| 3,639,997
|
from typing import Union
async def is_photo(obj: Union[Message, CallbackQuery]) -> bool:
"""
Checks if message content is photo
:return: True if so
"""
obj = await _to_message(obj)
return obj.content_type == 'photo'
|
13207a44dba000ad0486997f364f011cfffa9d26
| 3,639,998
|
def check_win(mat):
"""
Returns either:
False: Game not over.
True: Game won, 2048 is found in mat
"""
if 2048 in mat: # If won, teriminal state is needed for RL agent
return True # Terminal state
else:
return False
|
0824bc059cfa32b275c7b63f98d22e8a5b667e06
| 3,639,999
|
def mtl_to_json(mtl_text):
""" Convert Landsat MTL file to dictionary of metadata values """
mtl = {}
for line in mtl_text.split('\n'):
meta = line.replace('\"', "").strip().split('=')
if len(meta) > 1:
key = meta[0].strip()
item = meta[1].strip()
if key != "GROUP" and key != "END_GROUP":
mtl[key] = item
return mtl
|
310be04e9fbf756e9cf5ead60e53aae974d2ed50
| 3,640,000
|
def endian_swap(word):
"""Given any string, swap bits and return the result.
:rtype: str
"""
return "".join([word[i:i+2] for i in [6, 4, 2, 0]])
|
dfca46a012602150957a0830cf30cc6b6790df80
| 3,640,001
|
import logging
def get_grundsteuer(request_id: str):
"""
Route for retrieving job status of a grundsteuer tax declaration validation from the queue.
:param request_id: the id of the job.
"""
try:
raise NotImplementedError()
except NotImplementedError:
logging.getLogger().info("Could not retrieve status of job " + request_id, exc_info=True)
return JSONResponse(status_code=500, content=generate_dummy_error_response())
|
d92431ff1e09652d78b7beeaeabdeb2d502d0829
| 3,640,002
|
def str_to_col_grid_lists(s):
"""
Convert a string to selected columns and selected grid ranges.
Parameters:
s: (str) a string representing one solution.
For instance, *3**9 means 2 out of 5 dimensions are selected; the second and the last columns are selected,
and their corresponding grid ranges are 3 and 9. The function will return (1, 4) and (3, 9).
Return:
selected_cols (list): list of columns selected as indicated by the string.
selected_ranges (list): list of grid ranges selected as indicated by the string.
"""
selected_cols, selected_ranges = [], []
for i in range(len(s)):
if s[i] != "*":
selected_cols.append(i)
selected_ranges.append(int(s[i]))
return selected_cols, selected_ranges
|
4f5c67afa0dc97070b08223acbe6764010fd213a
| 3,640,003
|
from typing import Union
import uuid
from typing import List
def get_installation_indices_by_installation_id(
db_session: Session, installation_id: Union[str, uuid.UUID]
) -> List[SlackIndexConfiguration]:
"""
Gets all the indices set up in an installation given on the ID of that installation.
"""
bot_installation = (
db_session.query(SlackOAuthEvent)
.filter(SlackOAuthEvent.id == installation_id)
.one()
)
return get_installation_indices(db_session, bot_installation)
|
0025599259a8f23e1da462d465448f3ed9a1701f
| 3,640,004
|
def convert_hdf(proj_dir, dir_list, hdf_filepath_list, hdf_filename_list):
"""Converts downloaded HDF file into geotiff file format."""
global src_xres
global src_yres
geotiff_list = []
"""Converts MODIS HDF files to a geotiff format."""
print "Converting MODIS HDF files to geotiff format..."
out_format = 'GTiff'
local_array = zip(hdf_filepath_list, hdf_filename_list)
for dir in dir_list:
for in_filepath, out_filename in local_array:
# Open the LST_Day_1km dataset
src_open = gdal.Open(in_filepath, gdalconst.GA_ReadOnly) # open file with all sub-datasets
src_subdatasets = src_open.GetSubDatasets() # make a list of sub-datasets in the HDF file
subdataset = gdal.Open(src_subdatasets[0][0])
# Get parameters from LST dataset
src_cols = subdataset.RasterXSize
src_rows = subdataset.RasterYSize
src_band_count = subdataset.RasterCount
src_geotransform = subdataset.GetGeoTransform()
src_xres = src_geotransform[1]
src_yres = src_geotransform[5]
src_proj = subdataset.GetProjection()
# Read dataset to array
src_band = subdataset.GetRasterBand(1)
src_array = src_band.ReadAsArray(0, 0, src_cols, src_rows).astype(np.float)
# Set up output file
driver = gdal.GetDriverByName(out_format)
out_file = "%s\%s.%s" % (dir, out_filename, "tif")
out_geotiff = driver.Create(out_file, src_cols, src_rows, src_band_count, gdal.GDT_Float32)
out_geotiff.SetGeoTransform(src_geotransform)
out_geotiff.SetProjection(src_proj)
out_geotiff.GetRasterBand(1).WriteArray(src_array)
out_geotiff.FlushCache()
# Create list of output geotiffs
geotiff_list.append(out_file)
return geotiff_list, src_xres, src_yres
|
f74b3e89b957746aaec9c04b4615bc5a3f7388e7
| 3,640,005
|
def _join_type_and_checksum(type_list, checksum_list):
"""
Join checksum and their correlated type together to the following format:
"checksums": [{"type":"md5", "checksum":"abcdefg}, {"type":"sha256", "checksum":"abcd12345"}]
"""
checksums = [
{
"type": c_type,
"checksum": checksum,
}
for c_type, checksum in zip(type_list, checksum_list)
]
return checksums
|
7f09ee72c6f51ad87d75a9b5e74ad8ef4776323f
| 3,640,006
|
def _local_groupby(df_rows, axis=0):
"""Apply a groupby on this partition for the blocks sent to it.
Args:
df_rows ([pd.DataFrame]): A list of dataframes for this partition. Goes
through the Ray object store.
Returns:
A DataFrameGroupBy object from the resulting groupby.
"""
concat_df = pd.concat(df_rows, axis=axis)
return concat_df.groupby(concat_df.index)
|
d78cd88bac7b03136bbe8401d207ee10c2d031f9
| 3,640,007
|
def colors_terrain() -> dict:
"""
Age of Empires II terrain colors for minimap.
Credit for a list of Age of Empires II terrain and player colors goes to:
https://github.com/goto-bus-stop/recanalyst.
This function has great potential for contributions from designers
and other specialists.
Got information what Terrain IDs are what?
Got better color suggestions?
Please create an issue https://github.com/Deasilsoft/a2j/issues!
Pull requests would be even more awesome!
:rtype: dict
"""
return {
0: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
# WATER
1: {
0: (48, 93, 182),
1: (48, 93, 182),
2: (48, 93, 182),
},
# SHORES
2: {
0: (248, 201, 138),
1: (232, 180, 120),
2: (189, 150, 111),
},
3: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
4: {
0: (84, 146, 176),
1: (84, 146, 176),
2: (84, 146, 176),
},
5: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
6: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
7: {
0: (138, 139, 87),
1: (130, 136, 77),
2: (118, 130, 65),
},
8: {
0: (138, 139, 87),
1: (130, 136, 77),
2: (118, 130, 65),
},
9: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
10: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
11: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
12: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
# FOREST
13: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
14: {
0: (248, 201, 138),
1: (232, 180, 120),
2: (189, 150, 111),
},
15: {
0: (48, 93, 182),
1: (48, 93, 182),
2: (48, 93, 182),
},
# CLIFFS
16: {
0: (128, 100, 100),
1: (128, 100, 100),
2: (128, 100, 100),
},
17: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
18: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
19: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
20: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
21: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
22: {
0: (0, 74, 161),
1: (0, 74, 161),
2: (0, 74, 161),
},
23: {
0: (0, 74, 187),
1: (0, 74, 187),
2: (0, 74, 187),
},
24: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
25: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
26: {
0: (152, 192, 240),
1: (152, 192, 240),
2: (152, 192, 240),
},
27: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
28: {
0: (48, 93, 182),
1: (48, 93, 182),
2: (48, 93, 182),
},
29: {
0: (138, 139, 87),
1: (130, 136, 77),
2: (118, 130, 65),
},
30: {
0: (138, 139, 87),
1: (130, 136, 77),
2: (118, 130, 65),
},
31: {
0: (138, 139, 87),
1: (130, 136, 77),
2: (118, 130, 65),
},
32: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
33: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
34: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
35: {
0: (152, 192, 240),
1: (152, 192, 240),
2: (152, 192, 240),
},
36: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
37: {
0: (152, 192, 240),
1: (152, 192, 240),
2: (152, 192, 240),
},
38: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
39: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (189, 209, 253),
},
40: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
41: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
42: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
43: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
44: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
45: {
0: (248, 201, 138),
1: (232, 180, 120),
2: (189, 150, 111),
},
46: {
0: (248, 201, 138),
1: (232, 180, 120),
2: (189, 150, 111),
},
47: {
0: (28, 28, 28),
1: (28, 28, 28),
2: (28, 28, 28),
},
48: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
49: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
50: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
51: {
0: (248, 201, 138),
1: (232, 180, 120),
2: (189, 150, 111),
},
52: {
0: (248, 201, 138),
1: (232, 180, 120),
2: (189, 150, 111),
},
53: {
0: (248, 201, 138),
1: (232, 180, 120),
2: (189, 150, 111),
},
54: {
0: (84, 146, 176),
1: (84, 146, 176),
2: (84, 146, 176),
},
55: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
56: {
0: (37, 116, 57),
1: (21, 118, 21),
2: (0, 114, 0),
},
57: {
0: (0, 74, 161),
1: (0, 74, 161),
2: (0, 74, 161),
},
58: {
0: (0, 84, 176),
1: (0, 84, 176),
2: (0, 84, 176),
},
59: {
0: (84, 146, 176),
1: (84, 146, 176),
2: (84, 146, 176),
},
60: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
61: {
0: (243, 170, 92),
1: (228, 162, 82),
2: (218, 156, 105),
},
62: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
63: {
0: (138, 139, 87),
1: (130, 136, 77),
2: (118, 130, 65),
},
64: {
0: (138, 139, 87),
1: (130, 136, 77),
2: (118, 130, 65),
},
65: {
0: (138, 139, 87),
1: (130, 136, 77),
2: (118, 130, 65),
},
66: {
0: (138, 139, 87),
1: (130, 136, 77),
2: (118, 130, 65),
},
67: {
0: (138, 139, 87),
1: (130, 136, 77),
2: (118, 130, 65),
},
68: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
69: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
70: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
71: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
72: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
73: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
74: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
75: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
76: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
77: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
78: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
79: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
80: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
81: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
82: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
83: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
84: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
85: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
86: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
87: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
88: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
89: {
0: (0, 169, 0),
1: (51, 151, 39),
2: (0, 141, 0),
},
90: {
0: (84, 146, 176),
1: (84, 146, 176),
2: (84, 146, 176),
},
91: {
0: (84, 146, 176),
1: (84, 146, 176),
2: (84, 146, 176),
},
92: {
0: (84, 146, 176),
1: (84, 146, 176),
2: (84, 146, 176),
},
93: {
0: (84, 146, 176),
1: (84, 146, 176),
2: (84, 146, 176),
},
94: {
0: (84, 146, 176),
1: (84, 146, 176),
2: (84, 146, 176),
},
95: {
0: (48, 93, 182),
1: (48, 93, 182),
2: (48, 93, 182),
},
96: {
0: (48, 93, 182),
1: (48, 93, 182),
2: (48, 93, 182),
},
97: {
0: (48, 93, 182),
1: (48, 93, 182),
2: (48, 93, 182),
},
98: {
0: (48, 93, 182),
1: (48, 93, 182),
2: (48, 93, 182),
},
99: {
0: (48, 93, 182),
1: (48, 93, 182),
2: (48, 93, 182),
}
}
|
8e8f00d689ce00203127a9d810b6017ee5a04e18
| 3,640,008
|
def _load_dataset(dataset_config, *args, num_batches=None, **kwargs):
"""
Loads a dataset from configuration file
If num_batches is None, this function will return a generator that iterates
over the entire dataset.
"""
dataset_module = import_module(dataset_config["module"])
dataset_fn = getattr(dataset_module, dataset_config["name"])
batch_size = dataset_config["batch_size"]
framework = dataset_config.get("framework", "numpy")
# XXX: BEGIN PATCH
kwargs.update(dataset_config['kwargs'])
# XXX: END PATCH
dataset = dataset_fn(batch_size=batch_size, framework=framework, *args, **kwargs)
if not isinstance(dataset, ArmoryDataGenerator):
raise ValueError(f"{dataset} is not an instance of {ArmoryDataGenerator}")
if dataset_config.get("check_run"):
return EvalGenerator(dataset, num_eval_batches=1)
if num_batches:
return EvalGenerator(dataset, num_eval_batches=num_batches)
return dataset
|
5a35be1cac9bf405206ebc29b24aa0c08c27a18f
| 3,640,010
|
def mock_checks_health(mocker: MockFixture):
"""Fixture for mocking checks.health."""
return mocker.patch("website_checker.checks.health")
|
aa6dff915bc1559838e46cc3e486d916a2c9f117
| 3,640,012
|
from typing import Dict
from typing import Any
def decode_jwt(
jwt_string: str
) -> Dict[Any, Any]:
""" Decodes the given JWT string without performing any verification.
Args:
jwt_string (str): A string of the JWT to decode.
Returns:
dict: A dictionary of the body of the JWT.
"""
return jwt.decode( # type: ignore
jwt_string,
algorithms = ['ES256K'],
options={"verify_signature": False}
)
|
39b3e14a3eb63723b2a8df21d5252ea937b0a41b
| 3,640,013
|
import collections
def _resolve_references(navigation, version, language):
"""
Iterates through an object (could be a dict, list, str, int, float, unicode, etc.)
and if it finds a dict with `$ref`, resolves the reference by loading it from
the respective JSON file.
"""
if isinstance(navigation, list):
# navigation is type list, resolved_navigation should also be type list
resolved_navigation = []
for item in navigation:
resolved_navigation.append(_resolve_references(item, version, language))
return resolved_navigation
elif isinstance(navigation, dict):
# navigation is type dict, resolved_navigation should also be type dict
resolved_navigation = collections.OrderedDict()
if DEFAULT_BRANCH in navigation and version != 'doc_test':
version = navigation[DEFAULT_BRANCH]
for key, value in navigation.items():
if key == '$ref' and language in value:
# The value is the relative path to the associated json file
referenced_json = load_json_and_resolve_references(value[language], version, language)
if referenced_json:
resolved_navigation = referenced_json
else:
resolved_navigation[key] = _resolve_references(value, version, language)
return resolved_navigation
else:
# leaf node: The type of navigation should be [string, int, float, unicode]
return navigation
|
cb955d74844a86afc4982199ec81b18899466b0e
| 3,640,014
|
from typing import Optional
from typing import Union
from typing import Sequence
def phq(data: pd.DataFrame, columns: Optional[Union[Sequence[str], pd.Index]] = None) -> pd.DataFrame:
"""Compute the **Patient Health Questionnaire (Depression) – 9 items (PHQ-9)**.
The PHQ-9 is a measure for depression.
.. note::
This implementation assumes a score range of [1, 4].
Use :func:`~biopsykit.questionnaires.utils.convert_scale()` to convert the items into the correct range
beforehand.
Parameters
----------
data : :class:`~pandas.DataFrame`
dataframe containing questionnaire data. Can either be only the relevant columns for computing this score or
a complete dataframe if ``columns`` parameter is supplied.
columns : list of str or :class:`pandas.Index`, optional
list with column names in correct order.
This can be used if columns in the dataframe are not in the correct order or if a complete dataframe is
passed as ``data``.
Returns
-------
:class:`~pandas.DataFrame`
PHQ9 score
Raises
------
:exc:`~biopsykit.utils.exceptions.ValidationError`
if number of columns does not match
:exc:`~biopsykit.utils.exceptions.ValueRangeError`
if values are not within the required score range
References
----------
Löwe, B., Spitzer, R.L., Zipfel, S., Herzog, W., 2002. Gesundheitsfragebogen für Patienten (PHQ-D).
*Manual und Testunterlagen*. 2. Auflage
"""
score_name = "PHQ9"
score_range = [0, 3]
# create copy of data
data = data.copy()
if columns is not None:
# if columns parameter is supplied: slice columns from dataframe
_assert_has_columns(data, [columns])
data = data.loc[:, columns]
_assert_num_columns(data, 9)
_assert_value_range(data, score_range)
return pd.DataFrame(data.sum(axis=1), columns=[score_name])
|
73b925b29a51b7f0575b3449b015d41d3287ca35
| 3,640,015
|
def mbc_choose_any_program(table_path):
"""
randomly select one item of MBCRadioProgramTable
:param table_path:
:return:
"""
table = playlist.MBCRadioProgramTable(table_path=table_path)
programs = list(filter(lambda x: x.playlist_slug, table.programs))
random_id = randint(0, len(programs) - 1)
if programs:
return programs[random_id]
|
397c56f4a4d79bf3cd2ede5eba13414fcb1836ae
| 3,640,016
|
def logout_view(request):
"""Logout a user."""
logout(request)
return redirect('users:login')
|
e14292c1fc78d8fb6f395129a1b77f141ce93627
| 3,640,017
|
def _cast(vtype, value):
"""
Cast a table type into a python native type
:param vtype: table type
:type vtype: string
:param value: value to cast
:type value: string
"""
if not vtype:
return None
if isinstance(value, str):
return_value = value.strip()
if return_value == "":
return_value = None
elif 'float' in vtype.lower():
try:
return_value = float(value)
except ValueError:
return_value = None
elif 'int' in vtype.lower():
try:
return_value = int(float(value))
except ValueError:
return_value = None
elif isinstance(value, (float, int, np.int, np.float, np.int16, np.int32,
np.int64, np.float16, np.float32, np.float64)):
return_value = value
else:
print("Cannot cast {0}".format(type(value)))
return_value = None
return return_value
|
27ffdb0dac7d7e5f092a798630e6b874626a27b2
| 3,640,019
|
def L2Norm(inputs, axis=0, num_axes=-1, eps=1e-5, mode='SUM', **kwargs):
"""L2 Normalization, introduced by `[Liu et.al, 2015] <https://arxiv.org/abs/1506.04579>`_.
Parameters
----------
inputs : Tensor
The input tensor.
axis : int
The start axis of stats region.
num_axes : int
The number of axes of stats region. Default is ``-1`` (Till End).
eps : float
The eps.
mode : str
The mode on computing normalizer. ``SUM`` or ``MEAN``.
Returns
-------
Tensor
The output tensor.
"""
CheckInputs(inputs, 1)
arguments = ParseArguments(locals())
output = Tensor.CreateOperator(nout=1, op_type='L2Norm', **arguments)
if inputs.shape is not None:
output.shape = inputs.shape[:]
return output
|
20c0a1677874adfbd6c24cb6f662d1c0dc6c93f1
| 3,640,020
|
from typing import Union
from typing import Sequence
import inspect
def has_option(obj, keywords: Union[str, Sequence[str]]) -> bool:
"""
Return a boolean indicating whether the given callable `obj` has the `keywords` in its signature.
"""
if not callable(obj):
return False
sig = inspect.signature(obj)
return all(key in sig.parameters for key in ensure_tuple(keywords))
|
de2c6d4d458a8db6f0ff555d04570897e3440c10
| 3,640,021
|
import mmh3
import struct
def create_element_rand(element_id):
"""
This function simply returns a 32 bit hash of the element id.
The result value should be used a random priority.
:param element_id: The element unique identifier
:return: an random integer
"""
if isinstance(element_id, int):
obj = struct.pack('i', element_id)
elif isinstance(element_id, long):
obj = struct.pack('q', element_id)
elif isinstance(element_id, str):
obj = element_id
else:
raise TypeError('Unknown type: pack it yourself with struct')
return int(mmh3.hash(obj))
|
095ced835235bec4b042a8a8b5eb3c44e967390e
| 3,640,022
|
def _ul_add_action(actions, opt, res_type, stderr):
"""Create new and append it to the actions list"""
r = _UL_RES[opt]
if r[0] is None:
_ul_unsupported_opt(opt, stderr)
return False
# we always assume the 'show' action to be requested and eventually change it later
actions.append(
[
_ul_show,
{"res": r[0], "res_type": res_type, "desc": r[3], "unit": r[4], "opt": opt},
]
)
return True
|
098492f8bd875c611650fa773fd308d1097bcd18
| 3,640,023
|
from typing import List
from typing import Any
import time
def _pack(cmd_id: int, payload: List[Any], privkey: datatypes.PrivateKey) -> bytes:
"""Create and sign a UDP message to be sent to a remote node.
See https://github.com/ethereum/devp2p/blob/master/rlpx.md#node-discovery for information on
how UDP packets are structured.
"""
cmd_id = to_bytes(cmd_id)
expiration = rlp.sedes.big_endian_int.serialize(int(time.time() + EXPIRATION))
encoded_data = cmd_id + rlp.encode(payload + [expiration])
signature = privkey.sign_msg(encoded_data)
message_hash = keccak(signature.to_bytes() + encoded_data)
return message_hash + signature.to_bytes() + encoded_data
|
11ade65dc4ceceab509d13456845d37671b8abfb
| 3,640,024
|
def clip_boxes(boxes, shape):
"""
:param boxes: (...)x4, float
:param shape: h, w
"""
orig_shape = boxes.shape
boxes = boxes.reshape([-1, 4])
h, w = shape
boxes[:, [0, 1]] = np.maximum(boxes[:, [0, 1]], 0)
boxes[:, 2] = np.minimum(boxes[:, 2], w)
boxes[:, 3] = np.minimum(boxes[:, 3], h)
return boxes.reshape(orig_shape)
|
60dbdb4d3aee5a4a0f7dc076ad6d8415ddc82ba0
| 3,640,025
|
def loss_fn(
models, backdoored_x, target_label, l2_factor=settings.BACKDOOR_L2_FACTOR,
):
"""loss function of backdoor model
loss_student = softmax_with_logits(teacher(backdoor(X)), target)
+ softmax_with_logits(student(backdoor(X)), target)
+ L2_norm(mask_matrix)
Args:
models(Python dict): teacher, student, backdoor models
x: a tf tensor of data, size = (batch_size, H, W, C)
target_label: a tf tensor of target label, one-hot encoded, size = (batch_size, class_num)
Returns:
loss_backdoor: a tf tensor indicates loss of backdoor model
"""
logits_from_teacher = models["teacher"](backdoored_x)
logits_from_student = models["student"](backdoored_x)
loss_backdoor = tf.nn.softmax_cross_entropy_with_logits(
labels=target_label, logits=logits_from_teacher
)
loss_backdoor += tf.nn.softmax_cross_entropy_with_logits(
labels=target_label, logits=logits_from_student
)
loss_backdoor += (
tf.nn.l2_loss(models["backdoor"].get_mask() * models["backdoor"].get_trigger())
* l2_factor
)
return tf.math.reduce_mean(loss_backdoor)
|
d13fa05f4f5ac7adbebb62a48774cfc552c3d42e
| 3,640,026
|
from .models import OneTimePassword, compute_expires_at
def create_otp(slug, related_objects=None, data=None, key_generator=None, expiration=None, deactivate_old=False):
"""
Create new one time password. One time password must be identified with slug.
Args:
slug: string for OTP identification.
related_objects: model instances related with OTP.
data: data which will be stored with OTP in the JSON format.
key_generator: OTP key generator.
expiration: OTP expiration time in seconds, default expiration will be used for None value.
deactivate_old: deactivate old tokens with the same slug ane related objects.
Returns:
OTP instance
"""
if deactivate_old:
deactivate_otp(slug, related_objects=related_objects)
key_generator = settings.OTP_DEFAULT_KEY_GENERATOR if key_generator is None else key_generator
key_generator = import_string(key_generator) if isinstance(key_generator, str) else key_generator
otp = OneTimePassword.objects.create(
slug=slug,
key_generator=key_generator,
expires_at=compute_expires_at(expiration or settings.OTP_DEFAULT_AGE),
data=data
)
if related_objects:
otp.related_objects.add(*related_objects)
return otp
|
20cbfd88b676ff0357fa5a37a51a3ffa24b4f76b
| 3,640,027
|
def get_pod_from_dn(dn):
"""
This parses the pod from a dn designator. They look like this:
topology/pod-1/node-101/sys/phys-[eth1/6]/CDeqptMacsectxpkts5min
"""
pod = POD_REGEX.search(dn)
if pod:
return pod.group(1)
else:
return None
|
23b790bf7b216239916ba86829bb5bee0e346a4a
| 3,640,028
|
import trace
def extend_table(rows, table):
"""
appends the results of the array to the existing table by an objectid
"""
try:
dtypes = np.dtype(
[
('_ID', np.int),
('DOM_DATE', '|S48'),
('DOM_DATE_CNT', np.int32),
('DOM_DATE_PER', np.float64),
('DOM_YEAR', np.int32),
('DOM_YEAR_CNT', np.int32),
('DOM_YEAR_PER', np.float64),
('OLDEST_DATE', '|S1024'),
('NEWEST_DATE', '|S1024'),
('NO_DATE_CNT', np.int32),
('NO_DATE_PER', np.float64),
('PCT_2_YEAR', np.float64),
('PCT_5_YEAR', np.float64),
('PCT_10_YEAR', np.float64),
('PCT_15_YEAR', np.float64),
('PCT_15_PLUS_YEAR', np.float64),
('FEATURE_CNT', np.int32),
('CURRENCY_SCORE', np.int32)
]
)
array = np.array(rows, dtypes)
da.ExtendTable(table, "OID@", array, "_ID", False)
return table
except:
line, filename, synerror = trace()
raise FunctionError(
{
"function": "",
"line": line,
"filename": filename,
"synerror": synerror,
"arc" : str(arcpy.GetMessages(2))
}
)
|
fc34b897d7e23e8833a63b0fd7ce72cd090f35ab
| 3,640,029
|
def drawblock(arr, num_class=10, fixed=False, flip=False, split=False):
"""
draw images in block
:param arr: array of images. format='NHWC'. sequence=[cls1,cls2,cls3,...,clsN,cls1,cls2,...clsN]
:param num_class: number of class. default as number of images across height. Use flip=True to set number of width as across width instead
:param fixed: force number of number of width == number of height
:param flip: flip
:param split: set an int to split. currently only support split horizontally
:return: blocks of images
"""
n_im = arr.shape[0]
h_im = arr.shape[1]
w_im = arr.shape[2]
c_im = arr.shape[3]
if flip:
num_w = num_class
num_h = np.ceil(np.float(n_im) / num_w) if not fixed else num_w
if fixed and isinstance(fixed, int):
num_h = fixed
else:
num_h = num_class
num_w = np.ceil(np.float(n_im) / num_h) if not fixed else num_h
if fixed and isinstance(fixed, int):
num_w = fixed
h_block = (h_im + 2) * num_h - 2
w_block = (w_im + 2) * num_w - 2
newarr = np.zeros((int(h_block), int(w_block), int(c_im)), dtype=np.uint8)
for i in xrange(n_im):
if i > num_w * num_h - 1:
break
if flip:
wk = i % num_w
hk = i // num_w
else:
hk = i % num_h
wk = i // num_h
wk = int(wk)
hk = int(hk)
newarr[hk*(h_im+2):hk*(h_im+2)+h_im, wk*(w_im+2):wk*(w_im+2)+w_im, :] = arr[i]
if split:
temp = newarr
newnh = int(np.ceil(float(num_class) / split))
newh = (h_im + 2) * newnh - 2
neww = int(w_block * split + 2)
newarr = np.zeros((newh, neww, int(c_im)), dtype=np.uint8)
for i in range(split):
if not num_class % split == 0 and i == split - 1:
newarr[:-h_im-2, i * w_block+i*2:(i + 1) * w_block+(i+1)*2, :] = temp[i * newh+i*2:, :, :]
else:
newarr[:, i*w_block+i*2:(i+1)*w_block, :] = temp[i*newh+i*2:(i+1)*newh, :, :]
return newarr
|
221dc90d8a674963221abe11720d23ac92af6225
| 3,640,030
|
def with_key(output_key_matcher):
"""Check does it have a key."""
return output_key_matcher
|
5bcb64550ce202f66ac43325fe8876249b45c52d
| 3,640,031
|
def generatePersistenceManager(inputArgument, namespace = None):
"""Generates a persistence manager base on an input argument.
A persistence manager is a utility object that aids in storing persistent data that must be saved after the interpreter shuts
down. This function will interpret the input argument provided and will return an appropriate
persistence manager object if possible.
inputArgument -- if a True Bool: a generic persistence file will be used.
-- if a String: the string will be interpreted as a filename for the persistence file.
-- if a utilities.persistenceManager object: the object will be used directly.
namespace -- a text string used to specify a namespace for the persistence manager. This allows multiple identical VMs to share
a common persistence file.
"""
if type(inputArgument) == bool and inputArgument:
#a True bool was provided as the input argument. Create a new persistence manager that uses a default file.
persistenceFilename = "defaultPersistence.vmp"
return persistenceManager(persistenceFilename, namespace)
elif type(inputArgument) == str:
#A string was provided as the persistence manager, so use that string as the filename
return persistenceManager(inputArgument, namespace)
elif type(inputArgument) == persistenceManager:
# a persistenceManager object was provided, so use that.
if namespace:
inputArgument.namespace = namespace #update the namespace used by the persistence manager
return inputArgument
else:
return None
|
a1042764974d1b8030c6b6dd2add444bea9e521c
| 3,640,032
|
def get_app():
"""
Creates a Sanic application whose routes are documented using the `api` module.
The routes and their documentation must be kept in sync with the application created
by `get_benchmark_app()`, so that application can serve as a benchmark in test cases.
"""
app = Sanic("test_api")
app.blueprint(swagger_blueprint)
@MessageAPI.post(app, "/message")
def message(request):
data = request.json
assert "message" in data
return {"message": "Message received."}
@app.get("/excluded")
@MessageAPI(exclude=True, tag="Excluded")
def excluded(request):
return {"message": "Excluded."}
@ExcludedMessageAPI.delete(app, "/excluded_delete")
def excluded_delete(request):
return {"message": "Excluded."}
@ExcludedMessageAPI.get(app, "/excluded_get")
def excluded_get(request):
return {"message": "Excluded."}
@ExcludedMessageAPI.head(app, "/excluded_head")
def excluded_head(request):
return {"message": "Excluded."}
@ExcludedMessageAPI.options(app, "/excluded_options")
def excluded_options(request):
return {"message": "Excluded."}
@ExcludedMessageAPI.patch(app, "/excluded_patch")
def excluded_patch(request):
return {"message": "Excluded."}
@ExcludedMessageAPI.post(app, "/excluded_post")
def excluded_post(request):
return {"message": "Excluded."}
@ExcludedMessageAPI.put(app, "/excluded_put")
def excluded_put(request):
return {"message": "Excluded."}
@ExcludedMessageAPI.route(app, "/excluded_route", methods=("GET", "POST"))
def excluded_route(request):
return {"message": "Excluded."}
return app
|
1f8a11ee404082dcca0c1df91910157e5c169854
| 3,640,033
|
import base64
def predict(request):
"""View to predict output for selected prediction model
Args:
request (json): prediction model input (and parameters)
Returns:
json: prediction output
"""
projects = [{"name":"Erschließung Ob den Häusern Stadt Tengen", "id":101227},
{"name":"Stadtbauamt Bräunlingen Feldweg", "id":101205}]
if request.method == "GET":
context = {"projects": projects}
return render(request, "app/predict.html", context)
elif request.method == "POST":
with open(image_path, "rb") as image_file:
image_data = base64.b64encode(image_file.read()).decode('utf-8')
context = {"projects": projects,
"image": image_data}
return render(request, 'app/predict/index.html', context)
|
364db414d2c5811df0fe36e516868e0db76f896b
| 3,640,034
|
def is_dict(etype) -> bool:
""" Determine whether etype is a Dict """
return type(etype) is GenericMeta and etype.__extra__ is dict
|
fb0e422e08abd3b20611a8817300334d32638b49
| 3,640,035
|
import torch
from typing import List
def hidden_state_embedding(hidden_states: torch.Tensor, layers: List[int],
use_cls: bool, reduce_mean: bool = True) -> torch.Tensor:
"""
Extract embeddings from hidden attention state layers.
Parameters
----------
hidden_states
Attention hidden states in the transformer model.
layers
List of layers to use for the embedding.
use_cls
Whether to use the next sentence token (CLS) to extract the embeddings.
reduce_mean
Whether to take the mean of the output tensor.
Returns
-------
Tensor with embeddings.
"""
hs = [hidden_states[layer][:, 0:1, :] if use_cls else hidden_states[layer] for layer in layers]
hs = torch.cat(hs, dim=1) # type: ignore
y = hs.mean(dim=1) if reduce_mean else hs # type: ignore
return y
|
f732e834f9c3437a4a7278aa6b9bfc54589b093b
| 3,640,036
|
from datetime import datetime
def is_new_user(day: datetime.datetime, first_day: datetime.datetime):
"""
Check if user has contributed results to this project before
"""
if day == first_day:
return 1
else:
return 0
|
8da8039d1c8deb5bb4414565d3c9dc19ce15adb6
| 3,640,037
|
def to_ndarray(X):
"""
Convert to numpy ndarray if not already. Right now, this only converts
from sparse arrays.
"""
if isinstance(X, np.ndarray):
return X
elif sps.issparse(X):
print('Converting from sparse type: {}'.format(type(X)))
return X.toarray()
else:
raise ValueError('Unexpected data type: {}'.format(type(X)))
|
337a78066316f32cf3a4f541d38c78de18750264
| 3,640,038
|
def _2d_gauss(x, y, sigma=2.5 / 60.0):
"""A Gaussian beam"""
return np.exp(-(x ** 2 + y ** 2) / (2 * sigma ** 2))
|
c010989499682e4847376a162852c9f758907385
| 3,640,039
|
def attach_task_custom_attributes(queryset, as_field="task_custom_attributes_attr"):
"""Attach a json task custom attributes representation to each object of the queryset.
:param queryset: A Django projects queryset object.
:param as_field: Attach the task custom attributes as an attribute with this name.
:return: Queryset object with the additional `as_field` field.
"""
model = queryset.model
sql = """
SELECT json_agg(
row_to_json(custom_attributes_taskcustomattribute)
ORDER BY custom_attributes_taskcustomattribute.order
)
FROM custom_attributes_taskcustomattribute
WHERE custom_attributes_taskcustomattribute.project_id = {tbl}.id
"""
sql = sql.format(tbl=model._meta.db_table)
queryset = queryset.extra(select={as_field: sql})
return queryset
|
584d2f918ae1844beb5cab71318691094de6d56d
| 3,640,040
|
import torch
def softmax_like(env, *, trajectory_model, agent_model, log=False):
"""softmax_like
:param env: OpenAI Gym environment
:param trajectory_model: trajectory probabilistic program
:param agent_model: agent's probabilistic program
:param log: boolean; if True, print log info
"""
Qs = torch.as_tensor(
[
infer_Q(
env,
action,
trajectory_model=trajectory_model,
agent_model=agent_model,
log=log,
)
for action in range(env.action_space.n)
]
)
action_logits = args.alpha * Qs
action_dist = Categorical(logits=action_logits)
if log:
print('policy:')
print(
tabulate(
[action_dist.probs.tolist()],
headers=env.actions,
tablefmt='fancy_grid',
)
)
return action_dist.sample()
|
7b51e0336399914e357b4dbed0490e93fb22f70a
| 3,640,041
|
def bulk_add(packages, user):
"""
Support bulk add by processing entries like:
repo [org]
"""
added = 0
i = 0
packages = packages.split('\n')
num = len(packages)
org = None
results = str()
db.set(config.REDIS_KEY_USER_SLOTNUM_PACKAGE % user, num)
results += "Added %s slots.\n" % num
orgs_selected = db.hgetall(config.REDIS_KEY_USER_ORGS_SELECTED %
user).items()
for package in packages:
try: # First, try: repo [org]
package, org = package.split()
for orgsel in orgs_selected:
if org == orgsel[1]:
get_package_selected(user, package=package,
orgset=orgsel[0], slotset=i)
results += ("Added %s to slot %s with organization %s.\n" %
(package, i + 1, org))
added += 1
i += 1
except: # Next, try: repo
try:
package = package.split()
package = package[0]
get_package_selected(user, package=package, slotset=i)
results += "Added %s to slot %s.\n" % (
package, i + 1)
added += 1
i += 1
except: # Give up
pass
results += "Added %s packages" % added
if added == 0:
results += ", check org slots for matching org?\n"
else:
results += ".\n"
return results
|
7b027b45e6e3385fc3bc3da8916b8322dde7cfda
| 3,640,042
|
def laser_heater_to_energy_spread(energy_uJ):
"""
Returns rms energy spread in induced in keV.
Based on fits to measurement in SLAC-PUB-14338
"""
return 7.15*sqrt(energy_uJ)
|
59feb872f0c652e0ef28b0958d2b25c174a79152
| 3,640,043
|
def apparent_attenuation(og, fg):
"""Apparent attenuation
"""
return 100.0 * (float(og) - float(fg)) / float(og)
|
e22ce07229baa4eacb7388280630d6097e21f364
| 3,640,044
|
def most_similar(W, vocab, id2word, word, n=15):
"""
Find the `n` words most similar to the given `word`. The provided
`W` must have unit vector rows, and must have merged main- and
context-word vectors (i.e., `len(W) == len(word2id)`).
Returns a list of word strings.
"""
assert len(W) == len(vocab)
word_id = vocab[word][0]
dists = np.dot(W, W[word_id])
top_ids = np.argsort(dists)[::-1][:n + 1]
return [id2word[id] for id in top_ids if id != word_id][:n]
|
3e13a1e24935c7eacea9973c9af315d0a2a0fca4
| 3,640,045
|
def build_cell(num_units,
num_layers,
cell_fn,
initial_state=None,
copy_state=True,
batch_size=None,
output_dropout_rate=0.,
input_shape=None,
attention_mechanism_fn=None,
memory=None,
memory_sequence_len=None,
alignment_history=False,
mode=tf.estimator.ModeKeys.TRAIN,
name=None):
""""
General function to create RNN cells for decoding.
Handles multi-layer cases, LSTMs and attention wrappers
"""
if alignment_history == True:
print("a")
input()
cells = []
for _ in range(num_layers):
cell = cell_fn(num_units, dtype=tf.float32, name=name)
# build internal variables if input shape provided
if input_shape is not None:
cell.build(input_shape)
# apply dropout if its a tensor or we are in training
if ((isinstance(output_dropout_rate, tf.Tensor) or
output_dropout_rate > 0 and mode == tf.estimator.ModeKeys.TRAIN)):
cell = tf.contrib.rnn.DropoutWrapper(
cell,
output_keep_prob=1 - output_dropout_rate)
cells.append(cell)
if num_layers > 1:
cell = tf.nn.rnn_cell.MultiRNNCell(cells)
else:
cell = cells[0]
if initial_state is not None and not copy_state:
if batch_size is None:
batch_size = tf.shape(tf.contrib.framework.nest.flatten(initial_state)[0])[0]
zero_state = cell.zero_state(batch_size, tf.float32)
initial_state = bridge_state(initial_state, zero_state)
if attention_mechanism_fn is not None:
attention_mechanism = attention_mechanism_fn(
num_units,
memory,
memory_sequence_len)
cell_input_fn = None
if isinstance(attention_mechanism, CoverageBahdanauAttention):
cell_input_fn = (
lambda inputs, attention: tf.concat([inputs, tf.split(attention, 2, axis=-1)[0]], -1))
cell = tf.contrib.seq2seq.AttentionWrapper(
cell,
attention_mechanism,
output_attention=not isinstance(
attention_mechanism, tf.contrib.seq2seq.BahdanauAttention),
attention_layer_size=num_units,
initial_cell_state=initial_state,
alignment_history=alignment_history)
if batch_size is None:
batch_size = tf.shape(tf.contrib.framework.nest.flatten(initial_state)[0])[0]
initial_state = cell.zero_state(batch_size, tf.float32)
return (cell, initial_state) if initial_state is not None else cell
|
85d284ba314bea94ba015f7a85d0ba6685103292
| 3,640,047
|
def setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Use config values to set up a function enabling status retrieval."""
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = conf[CONF_PORT]
apcups_data = APCUPSdData(host, port)
hass.data[DOMAIN] = apcups_data
# It doesn't really matter why we're not able to get the status, just that
# we can't.
try:
apcups_data.update(no_throttle=True)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failure while testing APCUPSd status retrieval")
return False
return True
|
ccb2061fe8c36b799e5179f113c380d379ebec9d
| 3,640,048
|
import signal
def _lagged_coherence_1freq(x, f, Fs, N_cycles=3, f_step=1):
"""Calculate lagged coherence of x at frequency f using the hanning-taper FFT method"""
# Determine number of samples to be used in each window to compute lagged coherence
Nsamp = int(np.ceil(N_cycles * Fs / f))
# For each N-cycle chunk, calculate the fourier coefficient at the frequency of interest, f
chunks = _nonoverlapping_chunks(x, Nsamp)
C = len(chunks)
hann_window = signal.hanning(Nsamp)
fourier_f = np.fft.fftfreq(Nsamp, 1 / float(Fs))
fourier_f_idx = np.argmin(np.abs(fourier_f - f))
fourier_coefsoi = np.zeros(C, dtype=complex)
for i2, c in enumerate(chunks):
fourier_coef = np.fft.fft(c * hann_window)
fourier_coefsoi[i2] = fourier_coef[fourier_f_idx]
# Compute the lagged coherence value
lcs_num = 0
for i2 in range(C - 1):
lcs_num += fourier_coefsoi[i2] * np.conj(fourier_coefsoi[i2 + 1])
lcs_denom = np.sqrt(np.sum(
np.abs(fourier_coefsoi[:-1])**2) * np.sum(np.abs(fourier_coefsoi[1:])**2))
return np.abs(lcs_num / lcs_denom)
|
8a1cefe6fa2ef87dbc71f3f4449afc4406fa2c5f
| 3,640,049
|
def program_hash(p:Program)->Hash:
""" Calculate the hashe of a program """
string=";".join([f'{nm}({str(args)})' for nm,args in p.ops if nm[0]!='_'])
return md5(string.encode('utf-8')).hexdigest()
|
f12ed910bc94070f64fe673ddd81925a704c700a
| 3,640,050
|
async def get_events(user_creds, client_creds, list_args, filter_func=None):
"""List events from all calendars according to the parameters given.
The supplied credentials dict may be updated if tokens are refreshed.
:param user_creds: User credentials from `obtain_user_permission`.
:param client_creds: Client credentials from configuration.
:param list_args: Arguments to pass to the calendar API's event list
function.
:param filter_func: Callable that can filter out individual events.
The function should return True to include, False to exclude.
:raise CredentialsError: if the credentials have not been set up,
or if they have expired.
"""
filter_func = filter_func or no_filter
if "access_token" not in user_creds:
raise CredentialsError("No access token in user credentials.")
async with Aiogoogle(user_creds=user_creds, client_creds=client_creds) as aiogoogle:
# Is there a way to cache service discovery?
service = await aiogoogle.discover("calendar", "v3")
try:
calendar_list = await aiogoogle.as_user(
service.calendarList.list(), timeout=30
)
_update_user_creds(user_creds, aiogoogle.user_creds)
events = []
for calendar_list_entry in calendar_list["items"]:
events += await _get_calendar_events(
aiogoogle,
service,
list_args,
calendar_list_entry,
filter_func,
)
return dict(items=sorted(events, key=_event_sort_key_function))
except HTTPError as ex:
if "invalid_grant" in str(ex):
raise CredentialsError("User credentials rejected.") from ex
raise
|
00a99194c993c5155a03b985ba46fec84fd82ad7
| 3,640,051
|
import logging
import pickle
def process_file(input_file, input_type, index, is_parallel):
"""
Process an individual SAM/BAM file.
How we want to process the file depends on the input type and whether we
are operating in parallel. If in parallel the index must be loaded for each
input file. If the input is a BAM file it needs to be read using Pysam, if
SAM it can be read directly as a text file.
Args:
input_file: Path to the input file.
input_type: Whether the file is 'bam' or 'sam'.
index: If operating in parallel a string to the index file, if not the
loaded GTF index dictionary.
is_parallel: Whether to operate in parallel.
Returns:
Dictionary containing alignment statistics for the input file.
"""
sample_name = input_file.split("/")[-1]
logger = logging.getLogger("stats." + sample_name[0:10])
logger.info("Processing " + sample_name + "...")
if is_parallel:
logger.info("Loading index...")
with open(index, "rb") as index_file:
loaded_index = pickle.load(index_file)
logger.info("Loaded.")
else:
loaded_index = index
if input_type == "sam":
logger.info("Parsing SAM file...")
with open(input_file) as sam:
output_table = gen_stats(sam, input_type, sample_name, loaded_index)
elif input_type == "bam":
logger.info("Parsing BAM file...")
bam = pysam.AlignmentFile(input_file, "rb")
output_table = gen_stats(bam, input_type, sample_name, loaded_index)
logger.info("Finished " + sample_name)
return output_table
|
a10c6b520fb586f4320f538b91adf7e7add4ace3
| 3,640,052
|
def add_dictionaries(coefficients, representatives, p):
""" Computes a dictionary that is the linear combination of `coefficients`
on `representatives`
Parameters
----------
coefficients : :obj:`Numpy Array`
1D array with the same number of elements as `representatives`. Each
entry is an integer mod p.
representatives : :obj:`list(dict)`
List where each entry is a dictionary. The keys on each dictionary are
integers, and these might coincide with dictionaries on other entries.
p : int(prime)
Returns
-------
rep_sum : :obj:`dict`
Result of adding the dictionaries on `representatives` with
`coefficients`.
Example
-------
>>> import numpy as np
>>> p=5
>>> coefficients = np.array([1,2,3])
>>> representatives = [
... {0:np.array([1,3]), 3:np.array([0,0,1])},
... {0:np.array([4,3]),2:np.array([4,5])},
... {3:np.array([0,4,0])}]
>>> add_dictionaries(coefficients, representatives, p)
{0: array([4, 4]), 3: array([0, 2, 1]), 2: array([3, 0])}
"""
rep_sum = {}
for i, rep in enumerate(representatives):
for spx_idx in iter(rep):
if spx_idx not in rep_sum:
rep_sum[spx_idx] = (coefficients[i] * rep[spx_idx]) % p
else:
rep_sum[spx_idx] = (rep_sum[spx_idx] + coefficients[i] * rep[
spx_idx]) % p
# end else
# end for
# end for
# Find simplices where expression is zero
zero_simplices = []
for spx_idx in iter(rep_sum):
if not np.any(rep_sum[spx_idx]):
zero_simplices.append(spx_idx)
# end if
# end for
# If an entry is zero, delete it
for spx_idx in zero_simplices:
del rep_sum[spx_idx]
# end for
return rep_sum
|
ffdb894b11509a72bc6baadc4c8c0d0d15f98110
| 3,640,053
|
def dropsRowsWithMatchClassAndDeptRemainderIsZero(df, Col, RemainderInt, classToShrink):
"""
Takes as input a dataframe, a column, a remainder integer, and a class within the column.
Returns the dataframe minus the rows that match the ClassToShrink in the Col and have a depth from the DEPT col with a remainder of zero.
"""
print("original lenght of dataframe = ", len(df))
df_new = df.drop(df[(df[Col] == classToShrink) & (df.index % 10 != 0)].index)
print("length of new dataframe after dropping rows = ", len(df_new))
print("number of rows dropped = ", len(df) - len(df_new))
print("length of 0 class is :", len(df_new[df_new[Col] == classToShrink]))
return df_new
|
f88ec5e8293d753defe0a6d31f083e52218011ba
| 3,640,054
|
import requests
import json
def get_token():
""" returns a session token from te internal API.
"""
auth_url = '%s/sessions' % local_config['INTERNAL_API_BASE_URL']
auth_credentials = {'eppn': 'worker@pebbles',
'password': local_config['SECRET_KEY']}
try:
r = requests.post(auth_url, auth_credentials, verify=local_config['SSL_VERIFY'])
return json.loads(r.text).get('token')
except:
return None
|
da875c11dd887a895fe6c133cba3d30e3b73082c
| 3,640,057
|
def setlist(L):
""" list[alpha] -> set[alpha] """
# E : set[alpha]
E = set()
# e : alpha
for e in L:
E.add(e)
return E
|
7607d3d47ea5634773298afaea12d03759c0f1d4
| 3,640,058
|
def _pixel_at(x, y):
"""
Returns (r, g, b) color code for a pixel with given coordinates (each value is in
0..256 limits)
"""
screen = QtGui.QGuiApplication.primaryScreen()
color = screen.grabWindow(0, x, y, 1, 1).toImage().pixel(0, 0)
return ((color >> 16) & 0xFF), ((color >> 8) & 0xFF), (color & 0xFF)
|
62341d5d7edc3529b5184babddf475bc35f407bf
| 3,640,060
|
from datetime import datetime
import time
def parse_tibia_time(tibia_time: str) -> datetime:
"""Gets a time object from a time string from tibia.com"""
tibia_time = tibia_time.replace(",","").replace(" ", " ")
# Getting local time and GMT
t = time.localtime()
u = time.gmtime(time.mktime(t))
# UTC Offset
local_utc_offset = ((timegm(t) - timegm(u)) / 60 / 60)
# Extracting timezone
tz = tibia_time[-4:].strip()
try:
# Convert time string to time object
# Removing timezone cause CEST and CET are not supported
t = datetime.strptime(tibia_time[:-4].strip(), "%b %d %Y %H:%M:%S")
except ValueError:
log.error("parse_tibia_time: couldn't parse '{0}'".format(tibia_time))
return None
# Getting the offset
if tz == "CET":
utc_offset = 1
elif tz == "CEST":
utc_offset = 2
else:
log.error("parse_tibia_time: unknown timezone for '{0}'".format(tibia_time))
return None
# Add/subtract hours to get the real time
return t + timedelta(hours=(local_utc_offset - utc_offset))
|
da9e8f4a9b8a94161d215ff1119d8510de57b434
| 3,640,061
|
def a3v(V: Vector3) -> np.ndarray:
"""Converts vector3 to numpy array.
Arguments:
V {Vector3} -- Vector3 class containing x, y, and z.
Returns:
np.ndarray -- Numpy array with the same contents as the vector3.
"""
return np.array([V.x, V.y, V.z])
|
f32476c613a8032bf7119d5b99a89e72c56628d2
| 3,640,062
|
def _p_value_color_format(pval):
"""Auxiliary function to set p-value color -- green or red."""
color = "green" if pval < 0.05 else "red"
return "color: %s" % color
|
ae58986dd586a1e6cd6b6281ff444f18175d1d32
| 3,640,063
|
def generator(seed):
"""
build the generator network.
"""
weights_initializer = tf.truncated_normal_initializer(stddev=0.02)
# fully connected layer to upscale the seed for the input of
# convolutional net.
target = tf.contrib.layers.fully_connected(
inputs=seed,
num_outputs=4 * 4 * 256,
activation_fn=tf.nn.relu,
normalizer_fn=None,
weights_initializer=weights_initializer,
scope='g_project')
# reshape to images
target = tf.reshape(target, [-1, 4, 4, 256])
# transpose convolution to upscale
for layer_idx in range(4):
if layer_idx == 3:
num_outputs = 1
kernel_size = 32
stride = 1
# arXiv:1511.06434v2
# use tanh in output layer
activation_fn = tf.nn.tanh
# arXiv:1511.06434v2
# use batch norm except the output layer
normalizer_fn = None
else:
num_outputs = 2 ** (6 - layer_idx)
kernel_size = 5
stride = 2
# arXiv:1511.06434v2
# use ReLU
activation_fn = tf.nn.relu
# arXiv:1511.06434v2
# use batch norm
normalizer_fn = tf.contrib.layers.batch_norm
target = tf.contrib.layers.convolution2d_transpose(
inputs=target,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
padding='SAME',
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
weights_initializer=weights_initializer,
scope='g_conv_t_{}'.format(layer_idx))
return target
|
93258f49ba0fc7d7d03507bdc7dc413b2a9e23d5
| 3,640,065
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.