content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from io import StringIO
import pandas
from datetime import datetime
def isotherm_from_bel(path):
"""
Get the isotherm and sample data from a BEL Japan .dat file.
Parameters
----------
path : str
Path to the file to be read.
Returns
-------
dataDF
"""
with open(path) as file:
line = file.readline().rstrip()
meta = {}
data = StringIO()
while line != '':
values = line.split(sep='\t')
line = file.readline().rstrip()
if len(values) < 2: # If "title" section
# read adsorption section
if values[0].strip().lower().startswith('adsorption data'):
line = file.readline().rstrip() # header
file_headers = line.replace('"', '').split('\t')
new_headers = ['branch']
for h in file_headers:
txt = next((
_FIELDS['isotherm_data'][a]
for a in _FIELDS['isotherm_data']
if h.lower().startswith(a)
), h)
new_headers.append(txt)
if txt == 'loading':
meta['loading_basis'] = 'molar'
for (u, c) in (
('/mmol', 'mmol'),
('/mol', 'mol'),
('/ml(STP)', 'cm3(STP)'),
('/cm3(STP)', 'cm3(STP)'),
):
if u in h:
meta['loading_unit'] = c
meta['material_basis'] = 'mass'
for (u, c) in (
('g-1', 'g'),
('kg-1', 'kg'),
):
if u in h:
meta['material_unit'] = c
if txt == 'pressure':
meta['pressure_mode'] = 'absolute'
for (u, c) in (
('/mmHg', 'torr'),
('/torr', 'torr'),
('/kPa', 'kPa'),
('/bar', 'bar'),
):
if u in h:
meta['pressure_unit'] = c
data.write('\t'.join(new_headers) + '\n')
line = file.readline() # firstline
while not line.startswith('0'):
data.write('False\t' + line)
line = file.readline()
# read desorption section
elif values[0].strip().lower().startswith('desorption data'):
file.readline() # header - discard
line = file.readline() # firstline
while not line.startswith('0'):
data.write('True\t' + line)
line = file.readline()
else:
continue
else:
values = [v.strip('"') for v in values]
key = values[0].lower()
try:
field = next(
v for k, v in _FIELDS.items()
if any([key.startswith(n) for n in v.get('text', [])])
)
except StopIteration:
continue
meta[field['name']] = values[1]
# Read prepared table
data.seek(0) # Reset string buffer to 0
data_df = pandas.read_csv(data, sep='\t')
data_df.dropna(inplace=True, how='all', axis='columns')
# Set extra metadata
meta['date'] = datetime.strptime(meta['date'], r'%y/%m/%d').isoformat()
meta['apparatus'] = 'BEL ' + meta["serialnumber"]
meta['loading_key'] = 'loading'
meta['pressure_key'] = 'pressure'
meta['other_keys'] = sorted([
a for a in data_df.columns
if a not in ['loading', 'pressure', 'measurement', 'branch']
])
return PointIsotherm(isotherm_data=data_df, **meta)
|
7aa144942ef6e0cb3d2926817015692b8fe8b99b
| 3,645,179
|
def model(x, a, b, c):
"""
Compute
.. math::
y = A + Be^{Cx}
Parameters
----------
x : array-like
The value of the model will be the same shape as the input.
a : float
The additive bias.
b : float
The multiplicative bias.
c : float
The exponent.
Return
------
y : array-like
An array of the same shape as ``x``, containing the model
computed for the given parameters.
"""
return a + b * exp(c * x)
|
12a8272fb773226ad328daeb460cc2ca84d4c6e0
| 3,645,181
|
import inspect
def equal_matches(
matches_a: kapture.Matches,
matches_b: kapture.Matches) -> bool:
"""
Compare two instances of kapture.Matches.
:param matches_a: first set of matches
:param matches_b: second set of matches
:return: True if they are identical, False otherwise.
"""
assert isinstance(matches_a, kapture.Matches)
assert isinstance(matches_b, kapture.Matches)
current_function_name = inspect.getframeinfo(inspect.currentframe()).function
return equal_sets(matches_a, matches_b, current_function_name)
|
57efdd63a56f4e94afc9a57e05f4e4f726ce7b44
| 3,645,183
|
def convert_to_example(img_data, target_data, img_shape, target_shape, dltile):
""" Converts image and target data into TFRecords example.
Parameters
----------
img_data: ndarray
Image data
target_data: ndarray
Target data
img_shape: tuple
Shape of the image data (h, w, c)
target_shape: tuple
Shape of the target data (h, w, c)
dltile: str
DLTile key
Returns
-------
Example: TFRecords example
TFRecords example
"""
if len(target_shape) == 2:
target_shape = (*target_shape, 1)
features = {
"image/image_data": _float64_feature(img_data),
"image/height": _int64_feature(img_shape[0]),
"image/width": _int64_feature(img_shape[1]),
"image/channels": _int64_feature(img_shape[2]),
"target/target_data": _float64_feature(target_data),
"target/height": _int64_feature(target_shape[0]),
"target/width": _int64_feature(target_shape[1]),
"target/channels": _int64_feature(target_shape[2]),
"dltile": _bytes_feature(tf.compat.as_bytes(dltile)),
}
return tf.train.Example(features=tf.train.Features(feature=features))
|
d8dd2b78a85d2e34d657aa36bfe3515ef1dd5418
| 3,645,184
|
def linear(args, output_size, bias, bias_start=0.0, scope=None, var_on_cpu=True, wd=0.0):
"""Linear map: sum_i(args[i] * W[i]), where W[i] is a variable.
Args:
args: a 2D Tensor or a list of 2D, batch x n, Tensors.
output_size: int, second dimension of W[i].
bias: boolean, whether to add a bias term or not.
bias_start: starting value to initialize the bias; 0 by default.
scope: VariableScope for the created subgraph; defaults to "Linear".
var_on_cpu: if True, put the variables on /cpu:0.
Returns:
A 2D Tensor with shape [batch x output_size] equal to
sum_i(args[i] * W[i]), where W[i]s are newly created matrices.
Raises:
ValueError: if some of the arguments has unspecified or wrong shape.
"""
assert args
if not isinstance(args, (list, tuple)):
args = [args]
# Calculate the total size of arguments on dimension 1.
total_arg_size = 0
shapes = [a.get_shape().as_list() for a in args]
for shape in shapes:
if len(shape) != 2:
raise ValueError("Linear is expecting 2D arguments: %s" % str(shapes))
if not shape[1]:
raise ValueError("Linear expects shape[1] of arguments: %s" % str(shapes))
else:
total_arg_size += shape[1]
# Now the computation.
with tf.variable_scope(scope or "Linear"):
if var_on_cpu:
with tf.device("/cpu:0"):
matrix = tf.get_variable("Matrix", [total_arg_size, output_size])
else:
matrix = tf.get_variable("Matrix", [total_arg_size, output_size])
if wd:
weight_decay = tf.mul(tf.nn.l2_loss(matrix), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
if len(args) == 1:
res = tf.matmul(args[0], matrix)
else:
res = tf.matmul(tf.concat(1, args), matrix)
if not bias:
return res
if var_on_cpu:
with tf.device("/cpu:0"):
bias_term = tf.get_variable(
"Bias", [output_size],
initializer=tf.constant_initializer(bias_start))
else:
bias_term = tf.get_variable(
"Bias", [output_size],
initializer=tf.constant_initializer(bias_start))
return res + bias_term
|
1072115bc42b3d2acb3d41cd0116a636f6bb7804
| 3,645,185
|
def _start_job(rule, settings, urls=None):
"""Start a new job for an InfernoRule
Note that the output of this function is a tuple of (InfernoJob, DiscoJob)
If this InfernoJob fails to start by some reasons, e.g. not enough blobs,
the DiscoJob would be None.
"""
job = InfernoJob(rule, settings, urls)
return job, job.start()
|
5eab30433004240d79f1ec4eeac868b40632cdde
| 3,645,186
|
def mult(dic,data,r=1.0,i=1.0,c=1.0,inv=False,hdr=False,x1=1.0,xn='default'):
"""
Multiple by a Constant
Parameter c is used even when r and i are defined. NMRPipe ignores c when
r or i are defined.
Parameters:
* dic Dictionary of NMRPipe parameters.
* data array of spectral data.
* r Constant to multply real data by.
* i Constant to multiply imaginary data by.
* c Constant to multiply both real and imaginary data by.
* inv Multiply by inverse of Constant (both real and imaginary)
* hdr Use constant value from header.
* x1 First point of region to multiply constant by.
* xn Last point of region to multiply constant by. 'default' specifies
the end of the vector.
"""
mn = x1 - 1
if xn == 'default':
mx = data.shape[-1]
else:
mx = xn
if hdr: # read in C from header
fn = "FDF"+str(int(dic["FDDIMORDER"][0])) # F1, F2, etc
c = dic[fn+"C1"]
r = 1.0
i = 1.0
rf = (r*c) # real factor
cf = (i*c) # complex factor
if inv:
rf = 1/rf
cf = 1/cf
data[...,mn:mx] = p.mult(data[...,mn:mx],r=rf,i=cf,c=1.0)
dic = update_minmax(dic,data)
return dic,data
|
833ab3784dc9210aa6be0f968f9115982ea93753
| 3,645,187
|
import re
def navigation_target(m) -> re.Pattern:
"""A target to navigate to. Returns a regular expression."""
if hasattr(m, 'any_alphanumeric_key'):
return re.compile(re.escape(m.any_alphanumeric_key), re.IGNORECASE)
if hasattr(m, 'navigation_target_name'):
return re.compile(m.navigation_target_name)
return re.compile(re.escape(m.text), re.IGNORECASE)
|
62cc847f5454e76afb128fd752b7fa83fd2e167e
| 3,645,189
|
def hammingDistance(strA, strB):
""" Determines the bitwise Hamming Distance between two strings. Used to
determine the fitness of a mutating string against the input.
Example:
bin(ord('a')) == '0b1100001'
bin(ord('9')) == '0b0111001'
bin(ord('a') ^ ord('9')) == '0b1011000'
bin(ord('a') ^ ord('9')).count('1') == 3
hammingDistance('a', '9') == 3
hammingDistance('a', '9') * 4 == 12
hammingDistance('aaaa', '9999') == 12
Args:
strA: A string
strB: A string
Returns:
Returns an integer that represents the Hamming Distance from a to b.
Raises:
ValueError: If the two strings are unequal in length or if one input is
not a string.
"""
if (not isinstance(strA, basestring) or not isinstance(strB, basestring)):
raise ValueError('Input is not a string', strA, strB)
if len(strA) != len(strB):
raise ValueError('The two strings are unequal in length', strA, strB)
# base case, hamming distance of nothing and nothing is 0
if (len(strA) == 0) and (len(strB) == 0):
return 0
# XOR both first characters, count the 1s, remaining is recursive case
return (
bin(ord(strA[0]) ^ ord(strB[0])).count('1') +
hammingDistance(strA[1:], strB[1:])
)
|
d417ac22a1abd4c2df0f274d809096f354ac4150
| 3,645,190
|
import regex
def convert(s, syntax=None):
"""Convert a regex regular expression to re syntax.
The first argument is the regular expression, as a string object,
just like it would be passed to regex.compile(). (I.e., pass the
actual string object -- string quotes must already have been
removed and the standard escape processing has already been done,
e.g. by eval().)
The optional second argument is the regex syntax variant to be
used. This is an integer mask as passed to regex.set_syntax();
the flag bits are defined in regex_syntax. When not specified, or
when None is given, the current regex syntax mask (as retrieved by
regex.get_syntax()) is used -- which is 0 by default.
The return value is a regular expression, as a string object that
could be passed to re.compile(). (I.e., no string quotes have
been added -- use quote() below, or repr().)
The conversion is not always guaranteed to be correct. More
syntactical analysis should be performed to detect borderline
cases and decide what to do with them. For example, 'x*?' is not
translated correctly.
"""
table = mastertable.copy()
if syntax is None:
syntax = regex.get_syntax()
if syntax & RE_NO_BK_PARENS:
del table[r'\('], table[r'\)']
del table['('], table[')']
if syntax & RE_NO_BK_VBAR:
del table[r'\|']
del table['|']
if syntax & RE_BK_PLUS_QM:
table['+'] = r'\+'
table['?'] = r'\?'
table[r'\+'] = '+'
table[r'\?'] = '?'
if syntax & RE_NEWLINE_OR:
table['\n'] = '|'
res = ""
i = 0
end = len(s)
while i < end:
c = s[i]
i = i+1
if c == '\\':
c = s[i]
i = i+1
key = '\\' + c
key = table.get(key, key)
res = res + key
else:
c = table.get(c, c)
res = res + c
return res
|
e61a9d555008c9b36b579f6eb1e32e1e9fa0e983
| 3,645,191
|
import select
def current_user(request):
"""Return the list of all the users with their ids.
"""
query = select([
User.id.label('PK_id'),
User.Login.label('fullname')
]).where(User.id == request.authenticated_userid)
print
return dict(DBSession.execute(query).fetchone())
|
6951a6c638886d773a9e92e161d9aa2b166b17b3
| 3,645,192
|
def get_test_examples_labels(dev_example_list, batch_size):
"""
:param dev_example_list: list of filenames containing dev examples
:param batch_size: int
:return: list of nlplingo dev examples, dev labels
"""
dev_chunk_generator = divide_chunks(dev_example_list, NUM_BIG_CHUNKS)
test_examples = []
# dev_chunk_generator yields lists, each of len == NUM_BIG_CHUNKS
for big_chunk in dev_chunk_generator:
chunk_lst = load_big_chunk(big_chunk) # big_chunk is a filepath to .npz
example_lst = []
for chunk in chunk_lst:
example_lst.extend(chunk)
example_generator = divide_chunks(example_lst, batch_size)
for example_chunk in example_generator:
test_examples.extend(example_chunk)
labels = [example.label for example in test_examples]
test_label = np.asarray(labels)
return test_examples, test_label
|
e46a3c1d9780c8b74fcef3311267ad87f5938a66
| 3,645,193
|
import uuid
import tokenize
from operator import getitem
from typing import Iterator
from typing import OrderedDict
def unpack_collections(*args, **kwargs):
"""Extract collections in preparation for compute/persist/etc...
Intended use is to find all collections in a set of (possibly nested)
python objects, do something to them (compute, etc...), then repackage them
in equivalent python objects.
Parameters
----------
*args
Any number of objects. If it is a dask collection, it's extracted and
added to the list of collections returned. By default, python builtin
collections are also traversed to look for dask collections (for more
information see the ``traverse`` keyword).
traverse : bool, optional
If True (default), builtin python collections are traversed looking for
any dask collections they might contain.
Returns
-------
collections : list
A list of all dask collections contained in ``args``
repack : callable
A function to call on the transformed collections to repackage them as
they were in the original ``args``.
"""
traverse = kwargs.pop("traverse", True)
collections = []
repack_dsk = {}
collections_token = uuid.uuid4().hex
def _unpack(expr):
if is_dask_collection(expr):
tok = tokenize(expr)
if tok not in repack_dsk:
repack_dsk[tok] = (getitem, collections_token, len(collections))
collections.append(expr)
return tok
tok = uuid.uuid4().hex
if not traverse:
tsk = quote(expr)
else:
# Treat iterators like lists
typ = list if isinstance(expr, Iterator) else type(expr)
if typ in (list, tuple, set):
tsk = (typ, [_unpack(i) for i in expr])
elif typ in (dict, OrderedDict):
tsk = (typ, [[_unpack(k), _unpack(v)] for k, v in expr.items()])
elif is_dataclass(expr) and not isinstance(expr, type):
tsk = (
apply,
typ,
(),
(
dict,
[
[f.name, _unpack(getattr(expr, f.name))]
for f in fields(expr)
],
),
)
else:
return expr
repack_dsk[tok] = tsk
return tok
out = uuid.uuid4().hex
repack_dsk[out] = (tuple, [_unpack(i) for i in args])
def repack(results):
dsk = repack_dsk.copy()
dsk[collections_token] = quote(results)
return simple_get(dsk, out)
return collections, repack
|
d855a4cea5cb16a5863d670edb4bda6d15e2b371
| 3,645,194
|
def get_usernames(joomlasession):
"""Get list of usernames on the homepage."""
users = joomlasession.query(Jos_Users).all()
return [user.username for user in users]
|
b866cdd8fb47d12b7b79291f3335ded217fa8a1d
| 3,645,195
|
def minor_block_encoder(block, include_transactions=False, extra_info=None):
"""Encode a block as JSON object.
:param block: a :class:`ethereum.block.Block`
:param include_transactions: if true transaction details are included, otherwise
only their hashes
:param extra_info: MinorBlockExtraInfo
:returns: a json encodable dictionary
"""
header = block.header
meta = block.meta
header_info = minor_block_header_encoder(header)
d = {
**header_info,
"hashMerkleRoot": data_encoder(meta.hash_merkle_root),
"hashEvmStateRoot": data_encoder(meta.hash_evm_state_root),
"gasUsed": quantity_encoder(meta.evm_gas_used),
"size": quantity_encoder(len(block.serialize())),
}
if include_transactions:
d["transactions"] = []
for i, _ in enumerate(block.tx_list):
d["transactions"].append(tx_encoder(block, i))
else:
d["transactions"] = [
id_encoder(tx.get_hash(), block.header.branch.get_full_shard_id())
for tx in block.tx_list
]
if extra_info:
_add_posw_info_to_resp(d, header.difficulty, extra_info)
return d
|
53b0eb26e7a8ef0c05f4149c71b09ff6505f85d0
| 3,645,196
|
def heaviside(x):
"""Implementation of the Heaviside step function (https://en.wikipedia.org/wiki/Heaviside_step_function)
Args:
x: Numpy-Array or single Scalar
Returns:
x with step values
"""
if x <= 0:
return 0
else:
return 1
|
5ef05263637501f82cea3befe897cd60ec39994d
| 3,645,197
|
def FallbackReader(fname):
"""Guess the encoding of a file by brute force by trying one
encoding after the next until something succeeds.
@param fname: file path to read from
"""
txt = None
for enc in GetEncodings():
try:
handle = open(fname, 'rb')
reader = codecs.getreader(enc)(handle)
txt = reader.read()
reader.close()
except Exception, msg:
handle.close()
continue
else:
return (enc, txt)
return (None, None)
|
d9f4235df9f472c7584192e920980f5f2668202a
| 3,645,198
|
def graph_3D(data, col="category", list_=[None], game=None, extents=None):
"""
3D t-sne graph data output
:param data: a pandas df generated from app_wrangling.call_boardgame_data()
:param col: string indicating which column (default 'category')
:param list_: list of elements in column (default [None])
:param game: string of board game name (default None)
:param extents: string (default None)
:return fig_out: 3D plotly figure
"""
# layout for the 3D plot:
axis_x = dict(
title="",
showgrid=True,
zeroline=False,
showticklabels=False,
showspikes=False,
range=[extents["min_x"], extents["max_x"]],
)
axis_y = axis_x.copy()
axis_y["range"] = [extents["min_y"], extents["max_y"]]
axis_z = axis_x.copy()
axis_z["range"] = [extents["min_z"], extents["max_z"]]
layout_out = go.Layout(
margin=dict(l=0, r=0, b=0, t=0),
scene=dict(xaxis=axis_x, yaxis=axis_y, zaxis=axis_z),
legend=dict(yanchor="top", y=0.93, xanchor="right", x=0.99),
paper_bgcolor="rgba(0,0,0,0)",
plot_bgcolor="rgba(0,0,0,0)",
)
# plotting data:
if (list_ == [None]) or (not list_):
set_data = data.copy(deep=True)
set_data["group"] = "none"
else:
set_data = app_wr.call_boardgame_radio(data, col, list_).explode("group")
data_out = []
# corresponds with dark2 palette:
# had trouble manually setting color palette for graph_object:
color_list = [
"#1b9e77",
"#d95f02",
"#7570b3",
"#e7298a",
"#66a61e",
"#e6ab02",
"#a6761d",
"#666666",
]
i = 0
for idx, val in set_data.groupby(set_data.group):
if idx == "none":
marker_style = dict(
size=val["average_rating"] * 1.6,
symbol="circle",
opacity=0.1,
color="grey",
)
legend_show = False
else:
marker_style = dict(
size=val["average_rating"] * 1.6,
symbol="circle",
opacity=0.4,
color=color_list[i],
)
legend_show = True
i += 1
scatter = go.Scatter3d(
name=idx,
x=val["x"],
y=val["y"],
z=val["z"],
mode="markers",
marker=marker_style,
text=val["name"],
hoverinfo="text+name",
showlegend=legend_show,
)
data_out.append(scatter)
if game:
game_data = data[data["name"] == game]
marker_style = dict(
size=game_data["average_rating"] * 1.6,
symbol="circle",
opacity=1.0,
color="purple",
)
scatter = go.Scatter3d(
name=game,
x=game_data["x"],
y=game_data["y"],
z=game_data["z"],
mode="markers",
marker=marker_style,
text=game_data["name"],
hoverinfo="text",
)
data_out.append(scatter)
fig_out = {"data": data_out, "layout": layout_out}
return fig_out
|
17ad05f2c7cc3413c145009fb72aed366ec9ab49
| 3,645,199
|
def get_selector(info, mode="advanced"):
"""
The selector that decides the scope of the dashboard. It MUST have the keywords
?work and ?author.
You can override everything here by adapting the query on WDQS:
https://w.wiki/3Cmd
Args:
info: either a dict containing complex information for the selector or a list of QIDs
mode: a string representing the mode. If "advanced", then a config is expected for the
info parameters. If "basic", a list of QIDs is expected. Defaults to "advanced".
"""
if mode == "advanced":
fields_of_work = info["restriction"]["author_area"]
if fields_of_work is not None:
field_of_work_selector = (
"""
VALUES ?field_of_work """
+ format_with_prefix(fields_of_work)
+ """
?author wdt:P101 ?field_of_work.
"""
)
else:
field_of_work_selector = ""
topic_of_work = info["restriction"]["topic_of_work"]
if topic_of_work is not None:
topic_of_work_selector = (
"""
VALUES ?topics """
+ format_with_prefix(topic_of_work)
+ """
?work wdt:P921/wdt:P279* ?topics.
"""
)
else:
topic_of_work_selector = ""
region = info["restriction"]["institution_region"]
if region is not None:
region_selector = (
"""
VALUES ?regions """
+ format_with_prefix(region)
+ """
?country wdt:P361* ?regions.
?author ( wdt:P108 | wdt:P463 | wdt:P1416 ) / wdt:P361* ?organization .
?organization wdt:P17 ?country.
"""
)
else:
region_selector = ""
gender = info["restriction"]["gender"]
if gender is not None:
gender_selector = (
"""
VALUES ?gender """
+ format_with_prefix(gender)
+ """
?author wdt:P21 ?gender.
"""
)
else:
gender_selector = ""
event = info["restriction"]["event"]
if event is not None:
# P823 - speaker
# P664 - organizer
# P1334 - has participant
# ^P710 - inverse of (participated in)
event_selector = (
"""
VALUES ?event """
+ format_with_prefix(event)
+ """
?event wdt:P823 | wdt:P664 | wdt:P1344 | ^wdt:P710 ?author.
"""
)
else:
event_selector = ""
author_is_topic_of = info["restriction"]["author_is_topic_of"]
if author_is_topic_of is not None:
author_is_topic_of_selector = (
"""
VALUES ?biographical_work """
+ format_with_prefix(author_is_topic_of)
+ """
?biographical_work wdt:P921 ?author.
"""
)
else:
author_is_topic_of_selector = ""
selector = (
field_of_work_selector
+ topic_of_work_selector
+ region_selector
+ gender_selector
+ event_selector
+ author_is_topic_of_selector
+ """
?work wdt:P50 ?author.
"""
)
else:
selector = f"""
VALUES ?work {format_with_prefix(info)} .
?work wdt:P50 ?author .
"""
return selector
|
c499a2b4e29a1afc91e6d40563a71ad5e71b7724
| 3,645,200
|
def get_table_names(self, connection, schema=None, **kw):
"""
Get table names
Args:
connection ():
schema ():
**kw:
Returns:
"""
return self._get_table_or_view_names(
["r", "e"], connection, schema, **kw
)
|
e66ae9eb284e10785c7172ab36c79b25a48dce47
| 3,645,201
|
def get_general(prefix, generator, pars, **kwargs):
""" A general getter function that either gets the asked-for data
from a file or generates it with the given generator function. """
pars = get_pars(pars, **kwargs)
id_pars, pars = get_id_pars_and_set_default_pars(pars)
try:
result = read_tensor_file(prefix=prefix, pars=id_pars,
filename=filename)
except RuntimeError:
result = generator(pars, id_pars)
return result
|
a27e25e0ec992f8faa13b167dafd38edd6eb6a1d
| 3,645,202
|
def gen_case(test):
"""Generates an OK test case for a test
Args:
test (``Test``): OK test for this test case
Returns:
``dict``: the OK test case
"""
code_lines = str_to_doctest(test.input.split('\n'), [])
for i in range(len(code_lines) - 1):
if code_lines[i+1].startswith('>>>') and len(code_lines[i].strip()) > 3 and not code_lines[i].strip().endswith("\\"):
code_lines[i] += ';'
code_lines.append(test.output)
return {
'code': '\n'.join(code_lines),
'hidden': test.hidden,
'locked': False
}
|
586a5436442172d43a022e33185bd84c302fdb9c
| 3,645,203
|
def get_user_list_view(request):
"""
render user admin view
Arguments:
request {object} -- wsgi http request object
Returns:
html -- render html template
"""
if request.user.has_perm('auth.view_user'):
user_list = User.objects.all()
temp_name = 'admin/list_users.html'
context = {
'user_url_path': '用户',
'obj': user_list
}
else:
temp_name = 'admin/error.html'
context = {}
return render(
request,
temp_name,
context=context
)
|
f0ee280ac60a48f61f5da0d7d63b050e16ea6696
| 3,645,204
|
def to_odds(p):
"""
Converts a probability to odds
"""
with np.errstate(divide='ignore'):
return p / (1 - p)
|
b468b75ad736ca67e1fb39fd231bc185d851fbdf
| 3,645,205
|
def step_euler(last, dt, drift, volatility, noise):
"""Approximate SDE in one time step with Euler scheme"""
return last + drift * dt + np.dot(volatility, noise)
|
e9f58425f696316679730397168c7965b3faadd5
| 3,645,206
|
def KK_RC66_fit(params, w, t_values):
"""
Kramers-Kronig Function: -RC-
Kristian B. Knudsen (kknu@berkeley.edu / kristianbknudsen@gmail.com)
"""
Rs = params["Rs"]
R1 = params["R1"]
R2 = params["R2"]
R3 = params["R3"]
R4 = params["R4"]
R5 = params["R5"]
R6 = params["R6"]
R7 = params["R7"]
R8 = params["R8"]
R9 = params["R9"]
R10 = params["R10"]
R11 = params["R11"]
R12 = params["R12"]
R13 = params["R13"]
R14 = params["R14"]
R15 = params["R15"]
R16 = params["R16"]
R17 = params["R17"]
R18 = params["R18"]
R19 = params["R19"]
R20 = params["R20"]
R21 = params["R21"]
R22 = params["R22"]
R23 = params["R23"]
R24 = params["R24"]
R25 = params["R25"]
R26 = params["R26"]
R27 = params["R27"]
R28 = params["R28"]
R29 = params["R29"]
R30 = params["R30"]
R31 = params["R31"]
R32 = params["R32"]
R33 = params["R33"]
R34 = params["R34"]
R35 = params["R35"]
R36 = params["R36"]
R37 = params["R37"]
R38 = params["R38"]
R39 = params["R39"]
R40 = params["R40"]
R41 = params["R41"]
R42 = params["R42"]
R43 = params["R43"]
R44 = params["R44"]
R45 = params["R45"]
R46 = params["R46"]
R47 = params["R47"]
R48 = params["R48"]
R49 = params["R49"]
R50 = params["R50"]
R51 = params["R51"]
R52 = params["R52"]
R53 = params["R53"]
R54 = params["R54"]
R55 = params["R55"]
R56 = params["R56"]
R57 = params["R57"]
R58 = params["R58"]
R59 = params["R59"]
R60 = params["R60"]
R61 = params["R61"]
R62 = params["R62"]
R63 = params["R63"]
R64 = params["R64"]
R65 = params["R65"]
R66 = params["R66"]
return (
Rs
+ (R1 / (1 + w * 1j * t_values[0]))
+ (R2 / (1 + w * 1j * t_values[1]))
+ (R3 / (1 + w * 1j * t_values[2]))
+ (R4 / (1 + w * 1j * t_values[3]))
+ (R5 / (1 + w * 1j * t_values[4]))
+ (R6 / (1 + w * 1j * t_values[5]))
+ (R7 / (1 + w * 1j * t_values[6]))
+ (R8 / (1 + w * 1j * t_values[7]))
+ (R9 / (1 + w * 1j * t_values[8]))
+ (R10 / (1 + w * 1j * t_values[9]))
+ (R11 / (1 + w * 1j * t_values[10]))
+ (R12 / (1 + w * 1j * t_values[11]))
+ (R13 / (1 + w * 1j * t_values[12]))
+ (R14 / (1 + w * 1j * t_values[13]))
+ (R15 / (1 + w * 1j * t_values[14]))
+ (R16 / (1 + w * 1j * t_values[15]))
+ (R17 / (1 + w * 1j * t_values[16]))
+ (R18 / (1 + w * 1j * t_values[17]))
+ (R19 / (1 + w * 1j * t_values[18]))
+ (R20 / (1 + w * 1j * t_values[19]))
+ (R21 / (1 + w * 1j * t_values[20]))
+ (R22 / (1 + w * 1j * t_values[21]))
+ (R23 / (1 + w * 1j * t_values[22]))
+ (R24 / (1 + w * 1j * t_values[23]))
+ (R25 / (1 + w * 1j * t_values[24]))
+ (R26 / (1 + w * 1j * t_values[25]))
+ (R27 / (1 + w * 1j * t_values[26]))
+ (R28 / (1 + w * 1j * t_values[27]))
+ (R29 / (1 + w * 1j * t_values[28]))
+ (R30 / (1 + w * 1j * t_values[29]))
+ (R31 / (1 + w * 1j * t_values[30]))
+ (R32 / (1 + w * 1j * t_values[31]))
+ (R33 / (1 + w * 1j * t_values[32]))
+ (R34 / (1 + w * 1j * t_values[33]))
+ (R35 / (1 + w * 1j * t_values[34]))
+ (R36 / (1 + w * 1j * t_values[35]))
+ (R37 / (1 + w * 1j * t_values[36]))
+ (R38 / (1 + w * 1j * t_values[37]))
+ (R39 / (1 + w * 1j * t_values[38]))
+ (R40 / (1 + w * 1j * t_values[39]))
+ (R41 / (1 + w * 1j * t_values[40]))
+ (R42 / (1 + w * 1j * t_values[41]))
+ (R43 / (1 + w * 1j * t_values[42]))
+ (R44 / (1 + w * 1j * t_values[43]))
+ (R45 / (1 + w * 1j * t_values[44]))
+ (R46 / (1 + w * 1j * t_values[45]))
+ (R47 / (1 + w * 1j * t_values[46]))
+ (R48 / (1 + w * 1j * t_values[47]))
+ (R49 / (1 + w * 1j * t_values[48]))
+ (R50 / (1 + w * 1j * t_values[49]))
+ (R51 / (1 + w * 1j * t_values[50]))
+ (R52 / (1 + w * 1j * t_values[51]))
+ (R53 / (1 + w * 1j * t_values[52]))
+ (R54 / (1 + w * 1j * t_values[53]))
+ (R55 / (1 + w * 1j * t_values[54]))
+ (R56 / (1 + w * 1j * t_values[55]))
+ (R57 / (1 + w * 1j * t_values[56]))
+ (R58 / (1 + w * 1j * t_values[57]))
+ (R59 / (1 + w * 1j * t_values[58]))
+ (R60 / (1 + w * 1j * t_values[59]))
+ (R61 / (1 + w * 1j * t_values[60]))
+ (R62 / (1 + w * 1j * t_values[61]))
+ (R63 / (1 + w * 1j * t_values[62]))
+ (R64 / (1 + w * 1j * t_values[63]))
+ (R65 / (1 + w * 1j * t_values[64]))
+ (R66 / (1 + w * 1j * t_values[65]))
)
|
eb64f86bc0a8a7ff0d88a1246a754563a955c61f
| 3,645,207
|
from typing import List
def similar_in_manner(manner_1: UnmarkableManner) -> List[Manner]:
"""
If the value is a wildcard value, return
all possible manner of articualtion values, otherwise
return the single corresponding manner of articulation value.
"""
if isinstance(manner_1, MarkedManner):
return manner_1.manner
return manner_states
|
fea0c78c93a5f80e4f2bba6eac7f628106fba796
| 3,645,208
|
import re
def wikify(value):
"""Converts value to wikipedia "style" of URLS, removes non-word characters
and converts spaces to hyphens and leaves case of value.
"""
value = re.sub(r'[^\w\s-]', '', value).strip()
return re.sub(r'[-\s]+', '_', value)
|
dc4504ea6eb7905b5e18a1d1f473a4f337697b26
| 3,645,209
|
def build_model(name, num_classes, loss='softmax', pretrained=True,
use_gpu=True, dropout_prob=0.0, feature_dim=512, fpn=True, fpn_dim=256,
gap_as_conv=False, input_size=(256, 128), IN_first=False):
"""A function wrapper for building a model.
"""
avai_models = list(__model_factory.keys())
if name not in avai_models:
raise KeyError('Unknown model: {}. Must be one of {}'.format(name, avai_models))
return __model_factory[name](
num_classes=num_classes,
loss=loss,
pretrained=pretrained,
use_gpu=use_gpu,
dropout_prob=dropout_prob,
feature_dim=feature_dim,
fpn=fpn,
fpn_dim=fpn_dim,
gap_as_conv=gap_as_conv,
input_size=input_size,
IN_first=IN_first
)
|
1278e5c30ebdb73e011b0630de3738936e87dc93
| 3,645,210
|
def policy_absent(name):
"""
Ensure that the named policy is not present
:param name: The name of the policy to be deleted
:returns: The result of the state execution
:rtype: dict
"""
current_policy = __salt__['mdl_vault.get_policy'](name)
ret = {'name': name,
'comment': '',
'result': False,
'changes': {}}
if not current_policy:
ret['result'] = True
ret['comment'] = ('The {policy_name} policy is not present.'.format(
policy_name=name))
elif __opts__['test']:
ret['result'] = None
if current_policy:
ret['changes']['old'] = current_policy
ret['changes']['new'] = {}
ret['comment'] = ('The {policy_name} policy {suffix}.'.format(
policy_name=name,
suffix='will be deleted' if current_policy else 'is not present'))
else:
try:
__salt__['mdl_vault.delete_policy'](name)
ret['result'] = True
ret['comment'] = ('The {policy_name} policy was successfully '
'deleted.')
ret['changes']['old'] = current_policy
ret['changes']['new'] = {}
except __utils__['mdl_vault.vault_error']() as e:
log.exception(e)
ret['comment'] = ('The {policy_name} policy failed to be '
'created/updated'.format(policy_name=name))
return ret
|
6f1498f07a8e14f2e7668d7d5cc8d68128cb6004
| 3,645,211
|
def _tolist(arg):
"""
Assure that *arg* is a list, e.g. if string or None are given.
Parameters
----------
arg :
Argument to make list
Returns
-------
list
list(arg)
Examples
--------
>>> _tolist('string')
['string']
>>> _tolist([1,2,3])
[1, 2, 3]
>>> _tolist(None)
[None]
"""
if isinstance(arg, str):
return [arg]
try:
return list(arg)
except TypeError:
return [arg]
|
e4293991eeb6d15470511281680af44353232c37
| 3,645,212
|
def calc_Qhs_sys(bpr, tsd):
"""
it calculates final loads
"""
# GET SYSTEMS EFFICIENCIES
# GET SYSTEMS EFFICIENCIES
energy_source = bpr.supply['source_hs']
scale_technology = bpr.supply['scale_hs']
efficiency_average_year = bpr.supply['eff_hs']
if scale_technology == "BUILDING":
if energy_source == "GRID":
tsd['E_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NATURALGAS":
tsd['NG_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "OIL":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "COAL":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['SOLAR_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "WOOD":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "NONE":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of LCA infrastructure / HEATING')
elif scale_technology == "DISTRICT":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = tsd['Qhs_sys'] / efficiency_average_year
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
elif scale_technology == "NONE":
tsd['NG_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['COAL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['OIL_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['WOOD_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['DH_hs'] = np.zeros(HOURS_IN_YEAR)
tsd['E_hs'] = np.zeros(HOURS_IN_YEAR)
else:
raise Exception('check potential error in input database of LCA infrastructure / HEATING')
return tsd
|
6016e2061b441248606b5a3ea95930d38b678525
| 3,645,213
|
import socket
from time import time as now
def wait_net_service(server, port, timeout=None):
""" Wait for network service to appear
@param timeout: in seconds, if None or 0 wait forever
@return: True of False, if timeout is None may return only True or
throw unhandled network exception
"""
s = socket.socket()
if timeout:
# time module is needed to calc timeout shared between two exceptions
end = now() + timeout
while True:
try:
if timeout:
next_timeout = end - now()
if next_timeout < 0:
return False
else:
s.settimeout(next_timeout)
s.connect((server, port))
except (socket.timeout, socket.error):
pass
else:
s.close()
return True
|
f8089ed335c783140ab4b334c44c534ee5c48121
| 3,645,214
|
import re
def latex_nucleus(nucleus):
"""Creates a isotope symbol string for processing by LaTeX.
Parameters
----------
nucleus : str
Of the form `'<mass><sym>'`, where `'<mass>'` is the nuceleus'
mass number and `'<sym>'` is its chemical symbol. I.e. for
lead-207, `nucleus` would be `'207Pb'`.
Returns
-------
latex_nucleus : str
Of the form ``$^{<mass>}$<sym>`` i.e. given `'207Pb'`, the
return value would be ``$^{207}$Pb``
Raises
------
ValueError
If `nucleus` does not match the regex ``^[0-9]+[a-zA-Z]+$``
"""
if re.match(r'\d+[a-zA-Z]+', nucleus):
mass = re.search(r'\d+', nucleus).group()
sym = re.search(r'[a-zA-Z]+', nucleus).group()
return f'$^{{{mass}}}${sym}'
else:
raise ValueError(
f'{cols.R}`nucleus` is invalid. Should match the regex'
f' \\d+[a-zA-Z]+{cols.END}'
)
|
e2a2c04a63284cdd8f06fdd556306149e7092703
| 3,645,215
|
def ConvertToFloat(line, colnam_list):
"""
Convert some columns (in colnam_list) to float, and round by 3 decimal.
:param line: a dictionary from DictReader.
:param colnam_list: float columns
:return: a new dictionary
"""
for name in colnam_list:
line[name] = round(float(line[name]), 3)
return line
|
e95fd6cfa9bb57060fdd835eea139fd9c67bc211
| 3,645,216
|
def rnn_step(x, prev_h, Wx, Wh, b):
"""
Run the forward pass for a single timestep of a vanilla RNN that uses a tanh
activation function.
The input data has dimension D, the hidden state has dimension H, and we use
a minibatch size of N.
Inputs:
- x: Input data for this timestep, of shape (N, D).
- prev_h: Hidden state from previous timestep, of shape (N, H)
- Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
- Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
- b: Biases of shape (H,)
Returns a tuple of:
- next_h: Next hidden state, of shape (N, H)
"""
next_h = np.tanh(x.dot(Wx) + prev_h.dot(Wh) + b)
return next_h
|
78fef10a4c23b7a33a60829e1ad02a2e0381b834
| 3,645,217
|
from typing import List
import json
def transform_application_assigned_users(json_app_data: str) -> List[str]:
"""
Transform application users data for graph consumption
:param json_app_data: raw json application data
:return: individual user id
"""
users: List[str] = []
app_data = json.loads(json_app_data)
for user in app_data:
users.append(user["id"])
return users
|
625c8f662b364bb3fe63bb26b06eaca57ae8be79
| 3,645,218
|
def testapp(app):
"""Create Webtest app."""
return TestApp(app)
|
68a46e993d75fc44de5a9063b409e89647a2738b
| 3,645,219
|
def to_y_channel(img):
"""Change to Y channel of YCbCr.
Args:
img (ndarray): Images with range [0, 255].
Returns:
(ndarray): Images with range [0, 255] (float type) without round.
"""
img = img.astype(np.float32) / 255.
if img.ndim == 3 and img.shape[2] == 3:
img = bgr2ycbcr(img, y_only=True)
img = img[..., None]
return img * 255.
|
64873fabbe6cd12db8c9bc96e4e93f7181c7742d
| 3,645,220
|
def get_user_signatures(user_id):
"""
Given a user ID, returns the user's signatures.
:param user_id: The user's ID.
:type user_id: string
:return: list of signature data for this user.
:rtype: [dict]
"""
user = get_user_instance()
try:
user.load(user_id)
except DoesNotExist as err:
return {'errors': {'user_id': str(err)}}
signatures = user.get_user_signatures()
return [agr.to_dict() for agr in signatures]
|
8c099f84c9a9f383b56019f985f1be63e315503a
| 3,645,221
|
import json
def post_vehicle_action():
""" Add vehicle
:return:
"""
output = JsonOutput()
try:
if not request.is_json:
raise TypeError('Payload is not json')
payload = request.json
usecases.SetVehicleUsecase(db=db, vehicle=payload).execute()
output.add(status=200, response=json.dumps({'data': request.json}))
except Exception as error:
db.session.rollback()
app.logger.critical(str(error))
output.add(status=400, response=json.dumps({'error': str(error)}))
return output.show()
|
a24b4e326fe12c7d2a7054ca4b89245924a96d60
| 3,645,222
|
def get_day_suffix(day):
"""
Returns the suffix of the day, such as in 1st, 2nd, ...
"""
if day in (1, 21, 31):
return 'st'
elif day in (2, 12, 22):
return 'nd'
elif day in (3, 23):
return 'rd'
else:
return 'th'
|
7d9277303357de5405b3f6894cda24726d60ad47
| 3,645,223
|
def retain_images(image_dir,xml_file, annotation=''):
"""Deprecated"""
image_in_boxes_dict=return_image_in_boxes_dict(image_dir,xml_file, annotation)
return [img for img in image_in_boxes_dict if image_in_boxes_dict[img]]
|
54efc8c8b3c31f8466a0044c2e679cbf5a5545ff
| 3,645,224
|
from typing import List
from typing import Tuple
from typing import Dict
from pathlib import Path
from typing import cast
def compute_owa(
metrics: List[Tuple[float, float]],
datasets: Dict[K, DatasetSplit],
metadata: List[MetaData],
) -> float:
"""
Computes the OWA metric from the M4 competition, using a weighted average of the relative
MASE and sMAPE metrics depending on the size of the datasets.
Args:
metrics: The forecast's metrics (MASE and sMAPE).
datasets: The datasets for which the forecasts have been generated, mapped from a hashable
so that computations do not have to be repeated.
metadata: Metadata available for the dataset.
Returns:
The OWA metric value.
"""
assert (
len(metrics) == len(datasets) == len(metadata)
), "The lengths of the provided lists must be equal."
dataset_weights = np.array([len(d.gluonts()) for d in datasets.values()])
dataset_weights = dataset_weights / dataset_weights.sum()
naive_mase = 0
naive_smape = 0
actual_mase = 0
actual_smape = 0
for metric, (dataset_key, split), meta, weight in zip(
metrics, datasets.items(), metadata, dataset_weights
):
cache_file = Path.home() / ".cache" / "naive2" / f"{dataset_key}"
if cache_file.exists():
naive_forecast = QuantileForecasts.load(cache_file)
else:
naive_forecast = _naive_2_forecasts(
split.gluonts(), meta.freq, cast(int, meta.prediction_length)
)
cache_file.parent.mkdir(parents=True, exist_ok=True)
naive_forecast.save(cache_file)
data = split.evaluation()
seasonal_error = naive_error(data.past, get_seasonality(meta.freq))
naive_mase += (
mase(naive_forecast.median, data.future, seasonal_error) * weight
)
naive_smape += smape(naive_forecast.median, data.future) * weight
actual_mase += metric[0] * weight
actual_smape += metric[1] * weight
return 0.5 * (actual_smape / naive_smape + actual_mase / naive_mase)
|
2f4d19eecf85a9be720fb705eafe83c2ac1bced1
| 3,645,225
|
def parse(cell, config):
"""Extract connection info and result variable from SQL
Please don't add any more syntax requiring
special parsing.
Instead, add @arguments to SqlMagic.execute.
We're grandfathering the
connection string and `<<` operator in.
"""
result = {"connection": "", "sql": "", "result_var": None}
pieces = cell.split(None, 3)
if not pieces:
return result
result["connection"] = _connection_string(pieces[0], config)
if result["connection"]:
pieces.pop(0)
if len(pieces) > 1 and pieces[1] == "<<":
result["result_var"] = pieces.pop(0)
pieces.pop(0) # discard << operator
result["sql"] = (" ".join(pieces)).strip()
return result
|
4711b5f873281db520ff4d91646412ca08f7cbb7
| 3,645,226
|
import requests
import json
def createResource(url, user, pWd, resourceName, resourceJson):
"""
create a new resource based on the provided JSON
returns rc=200 (valid) & other rc's from the put
resourceDef (json)
"""
# create a new resource
apiURL = url + "/access/1/catalog/resources/"
header = {"content-type": "application/json"}
print("\tcreating resource: " + resourceName)
newResourceResp = requests.post(
apiURL,
data=json.dumps(resourceJson),
headers=header,
auth=HTTPBasicAuth(user, pWd),
verify=False,
)
print("\trc=" + str(newResourceResp.status_code))
print("\tbody=" + str(newResourceResp.text))
return newResourceResp.status_code
|
71257041a7bf098edd0668de6026539e554baff4
| 3,645,227
|
import string
def generate_invalid_sequence():
"""Generates an invalid sequence of length 10"""
return ''.join(np.random.choice(list(string.ascii_uppercase + string.digits), size=10))
|
33639bc0c97710c411b2bfd0033ed15200c8edff
| 3,645,228
|
from input_surface import circle
def transform_bcs_profile(T, axis, Nc):
""" Translates the profile to body cs and then transforms it for rotation.
"""
profile = circle(Nc, radius = 1, flag = 0)
Pb_new = np.zeros((Nc, 3), dtype = float)
ind_p = np.arange(0, 3*Nc, step = 3, dtype = int)
p_new = np.zeros(3*Nc, dtype = float)
p_new[ind_p] = profile[:, 0] + axis[0]
p_new[ind_p + 1] = profile[:, 1] + axis[1]
p_new[ind_p + 2] = axis[2]
P_new = np.dot(T.toarray(), p_new)
Pb_new[:, 0] = P_new[ind_p]
Pb_new[:, 1] = P_new[ind_p + 1]
Pb_new[:, 2] = P_new[ind_p + 2]
return Pb_new
|
c3cacd480cba73c7a3ce1b6f5e90582fc93e2a4b
| 3,645,229
|
def count_configuration(config, root=True, num_samples_per_dist=1):
"""Recursively count configuration."""
count = 1
if isinstance(config, dict):
for _, v in sorted(config.items()):
count *= count_configuration(
v, root=False, num_samples_per_dist=num_samples_per_dist)
elif callable(config):
assert num_samples_per_dist > 0, ('callable not allowed in config with '
'num_samples_per_dist < 1')
count *= num_samples_per_dist
elif isinstance(config, list):
if root:
count = ()
for c in config:
count += (count_configuration(
c, root=False, num_samples_per_dist=num_samples_per_dist),)
else:
count *= len(config)
return count
|
f7709bfd18744355f5739c4d0e4b52b952f6c8c7
| 3,645,230
|
def transition(state_table):
"""Decorator used to set up methods which cause transitions between states.
The decorator is applied to methods of the context (state machine) class.
Invoking the method may cause a transition to another state. To define
what the transitions are, the nextStates method of the TransitionTable class
is used.
"""
stVarName = state_table.inst_state_name
def wrapper(func):
state_table._addEventHandler(func.__name__)
@wraps(func)
def objCall(self, *args, **kwargs):
state_var = getattr(self, stVarName)
state_var.setXition(func)
rtn = func(self, *args, **kwargs)
state_var.toNextState(self)
return rtn
objCall.wrapping = stVarName
return objCall
return wrapper
|
675a6afe6abd068027892be561c2d032d13be52a
| 3,645,231
|
from typing import Union
def isfinite(x: Union[ivy.Array, ivy.NativeArray], f: ivy.Framework = None)\
-> Union[ivy.Array, ivy.NativeArray]:
"""
Tests each element x_i of the input array x to determine if finite (i.e., not NaN and not equal to positive
or negative infinity).
:param x: Input array.
:type x: array
:param f: Machine learning framework. Inferred from inputs if None.
:type f: ml_framework, optional
:return: an array containing test results. An element out_i is True if x_i is finite and False otherwise.
The returned array must have a data type of bool.
"""
return _cur_framework(x, f=f).isfinite(x)
|
7326343319dae8bb8681a9612caaaddfc947f8c1
| 3,645,232
|
from pathlib import Path
from typing import Optional
def form_overwrite_file(
PATH: Path, QUESTION: Optional[str] = None, DEFAULT_NO: bool = True
) -> bool:
"""Yes/no form to ask whether file should be overwritten if already existing."""
if QUESTION is None:
QUESTION = "Overwrite {PATH}?"
save = True
if PATH.is_file():
save = form_yes_or_no(QUESTION, DEFAULT_NO=DEFAULT_NO)
return save
|
51de7eb948af9e0b6cd354e8e72815e16955200c
| 3,645,233
|
def dist(integer):
"""
Return the distance from center.
"""
if integer == 1:
return 0
c = which_layer(integer)
rows = layer_rows(c)
l = len(rows[0])
mid = (l / 2) - 1
for r in rows:
if integer in r:
list_pos = r.index(integer)
return c + abs(mid - list_pos) - 1
|
8fd27978058ac0d038836bd88dc2c7c590fec6b7
| 3,645,234
|
from datetime import datetime
def get_market_fundamental_by_ticker(date: str, market: str="KOSPI", prev=False) -> DataFrame:
"""특정 일자의 전종목 PER/PBR/배당수익률 조회
Args:
date (str ): 조회 일자 (YYMMDD)
market (str, optional): 조회 시장 (KOSPI/KOSDAQ/KONEX/ALL)
prev (bool, optional): 조회 일자가 휴일일 경우 이전 영업일 혹은 이후 영업일 선택
Returns:
DataFrame:
>> get_market_fundamental_by_ticker("20210104")
BPS PER PBR EPS DIV DPS
티커
095570 6802 4.660156 0.669922 982 6.550781 300
006840 62448 11.648438 0.399902 2168 2.970703 750
027410 15699 17.765625 0.320068 281 2.199219 110
282330 36022 15.062500 3.660156 8763 2.050781 2700
138930 25415 3.380859 0.219971 1647 6.468750 360
"""
if isinstance(date, datetime.datetime):
date = _datetime2string(date)
date = date.replace("-", "")
df = krx.get_market_fundamental_by_ticker(date, market)
holiday = (df[['BPS', 'PER', 'PBR', 'EPS', 'DIV', 'DPS']] == 0).all(axis=None)
if holiday:
target_date = get_nearest_business_day_in_a_week(date=date, prev=prev)
df = krx.get_market_fundamental_by_ticker(target_date, market)
# print(f"The date you entered {date} seems to be a holiday. PYKRX changes the date parameter to {target_date}.")
return df
|
ac13ef09867b69f354b75f1e1bd98f46baf995fc
| 3,645,235
|
def get_all(request):
""" Gets all tags in the db with counts of use """
tags = []
for tag in Tag.objects.all():
tag_data = {
'name': tag.name,
'count': tag.facebookimage_set.distinct().count()
}
if tag_data['count'] > 0:
tags.append(tag_data)
return JsonResponse({'data': tags})
|
bee0d6afd4ea8afeda0001d090cf0d9156249cce
| 3,645,236
|
def quadratic_crop(x, bbox, alpha=1.0):
"""bbox is xmin, ymin, xmax, ymax"""
im_h, im_w = x.shape[:2]
bbox = np.array(bbox, dtype=np.float32)
bbox = np.clip(bbox, 0, max(im_h, im_w))
center = 0.5 * (bbox[0] + bbox[2]), 0.5 * (bbox[1] + bbox[3])
w = bbox[2] - bbox[0]
h = bbox[3] - bbox[1]
l = int(alpha * max(w, h))
l = max(l, 2)
required_padding = -1 * min(center[0] - l, center[1] - l, im_w -
(center[0] + l), im_h - (center[1] + l))
required_padding = int(np.ceil(required_padding))
if required_padding > 0:
padding = [
[required_padding, required_padding],
[required_padding, required_padding],
]
padding += [[0, 0]] * (len(x.shape) - 2)
x = np.pad(x, padding, "reflect")
center = center[0] + required_padding, center[1] + required_padding
xmin = int(center[0] - l / 2)
ymin = int(center[1] - l / 2)
return np.array(x[ymin:ymin + l, xmin:xmin + l, ...])
|
53e9acf58cf743a89a4bfaafb9211abbbb9d57ec
| 3,645,237
|
def cdlbreakaway(
client,
symbol,
timeframe="6m",
opencol="open",
highcol="high",
lowcol="low",
closecol="close",
):
"""This will return a dataframe of breakaway for the given symbol across
the given timeframe
Args:
client (pyEX.Client): Client
symbol (string): Ticker
timeframe (string): timeframe to use, for pyEX.chart
opencol (string): column to use to calculate
highcol (string): column to use to calculate
lowcol (string): column to use to calculate
closecol (string): column to use to calculate
Returns:
DataFrame: result
"""
df = client.chartDF(symbol, timeframe)
val = t.CDLBREAKAWAY(
df[opencol].values.astype(float),
df[highcol].values.astype(float),
df[lowcol].values.astype(float),
df[closecol].values.astype(float),
)
return pd.DataFrame(
{
opencol: df[opencol].values,
highcol: df[highcol].values,
lowcol: df[lowcol].values,
closecol: df[closecol].values,
"cdlbreakaway": val,
}
)
|
c8bbc5adbbf742daabe39feaaa2dec01790297fe
| 3,645,238
|
def depends_on(*args):
"""Caches a `Model` parameter based on its dependencies.
Example
-------
>>> @property
>>> @depends_on('x', 'y')
>>> def param(self):
>>> return self.x * self.y
Parameters
----------
args : list of str
List of parameters this parameter depends on.
"""
cache = {}
def _wrapper(fn):
def _fn(self):
key = tuple(getattr(self, arg) for arg in args)
if key not in cache:
cache[key] = fn(self)
return cache[key]
return _fn
return _wrapper
|
09cdb0ad7601a953eafd01e3e19c0bdfb10dccb2
| 3,645,239
|
def l96(x, t, f):
""""This describes the derivative for the non-linear Lorenz 96 Model of arbitrary dimension n.
This will take the state vector x and return the equation for dxdt"""
# shift minus and plus indices
x_m_2 = np.concatenate([x[-2:], x[:-2]])
x_m_1 = np.concatenate([x[-1:], x[:-1]])
x_p_1 = np.append(x[1:], x[0])
dxdt = (x_p_1-x_m_2)*x_m_1 - x + f
return dxdt
|
0db03ed3a8923d18b50095852d17f9213e9f1f0f
| 3,645,240
|
def translation(im0, im1, filter_pcorr=0, odds=1, constraints=None,
reports=None):
"""
Return translation vector to register images.
It tells how to translate the im1 to get im0.
Args:
im0 (2D numpy array): The first (template) image
im1 (2D numpy array): The second (subject) image
filter_pcorr (int): Radius of the minimum spectrum filter
for translation detection, use the filter when detection fails.
Values > 3 are likely not useful.
constraints (dict or None): Specify preference of seeked values.
For more detailed documentation, refer to :func:`similarity`.
The only difference is that here, only keys ``tx`` and/or ``ty``
(i.e. both or any of them or none of them) are used.
odds (float): The greater the odds are, the higher is the preferrence
of the angle + 180 over the original angle. Odds of -1 are the same
as inifinity.
The value 1 is neutral, the converse of 2 is 1 / 2 etc.
Returns:
dict: Contains following keys: ``angle``, ``tvec`` (Y, X),
and ``success``.
"""
angle = 0
report_one = report_two = None
if reports is not None and reports.show("translation"):
report_one = reports.copy_empty()
report_two = reports.copy_empty()
# We estimate translation for the original image...
tvec, succ = _translation(im0, im1, filter_pcorr, constraints, report_one)
# ... and for the 180-degrees rotated image (the rotation estimation
# doesn't distinguish rotation of x vs x + 180deg).
tvec2, succ2 = _translation(im0, utils.rot180(im1), filter_pcorr,
constraints, report_two)
pick_rotated = False
if succ2 * odds > succ or odds == -1:
pick_rotated = True
if reports is not None and reports.show("translation"):
reports["t0-orig"] = report_one["amt-orig"]
reports["t0-postproc"] = report_one["amt-postproc"]
reports["t0-success"] = succ
reports["t0-tvec"] = tuple(tvec)
reports["t1-orig"] = report_two["amt-orig"]
reports["t1-postproc"] = report_two["amt-postproc"]
reports["t1-success"] = succ2
reports["t1-tvec"] = tuple(tvec2)
if reports is not None and reports.show("transformed"):
toapp = [
transform_img(utils.rot180(im1), tvec=tvec2, mode="wrap", order=3),
transform_img(im1, tvec=tvec, mode="wrap", order=3),
]
if pick_rotated:
toapp = toapp[::-1]
reports["after_tform"].extend(toapp)
if pick_rotated:
tvec = tvec2
succ = succ2
angle += 180
ret = dict(tvec=tvec, success=succ, angle=angle)
return ret
|
40e15b6154569d6bb13c7a38d595b603e9421d04
| 3,645,241
|
def CalculateConjointTriad(proteinsequence):
"""
Calculate the conjoint triad features from protein sequence.
Useage:
res = CalculateConjointTriad(protein)
Input: protein is a pure protein sequence.
Output is a dict form containing all 343 conjoint triad features.
"""
res = {}
proteinnum = _Str2Num(proteinsequence)
for i in range(8):
for j in range(8):
for k in range(8):
temp = str(i) + str(j) + str(k)
res[temp] = proteinnum.count(temp)
return res
|
1ed73c1aa78c5360715eb71c9d56594d028cf6d3
| 3,645,242
|
def extract_url(url):
"""Creates a short version of the URL to work with. Also returns None if its not a valid adress.
Args:
url (str): The long version of the URL to shorten
Returns:
str: The short version of the URL
"""
if url.find("www.amazon.de") != -1:
index = url.find("/dp/")
if index != -1:
index2 = index + 14
url = "https://www.amazon.de" + url[index:index2]
else:
index = url.find("/gp/")
if index != -1:
index2 = index + 22
url = "https://www.amazon.de" + url[index:index2]
else:
url = None
else:
url = None
return url
|
85421799b601c89aa54fdce6e98c003ea80111eb
| 3,645,244
|
import unicodedata
from typing import List
def match_name(own: str, other: str) -> bool:
"""
compares 2 medic names (respects missing middle names, or abbrev. name parts)
Args:
own: the first name
other: the last name
Returns: True if both names match
"""
# the simplest case, both name match completely
if own is None or other is None:
return True
own = unicodedata.normalize('NFKD', _remove_umlaut(own)).encode('ASCII', 'ignore').decode("utf-8").lower()
other = unicodedata.normalize('NFKD', _remove_umlaut(other)).encode('ASCII', 'ignore').decode("utf-8").lower()
if own == other:
return True
hn_other = parse_name(other)
hn_own = parse_name(own)
def _remove_surname_titles(surnames: List[str]) -> List[str]:
def _remove(s: str) -> str:
for t in _surname_titles:
s = s.replace(t, "")
return s
return list(map(_remove, surnames))
if hn_own is None or hn_other is None:
return False
# remove surname titles like "von" from surnames
hn_other.last_list = _remove_surname_titles(hn_other.last_list)
hn_other.last = " ".join(hn_other.last_list)
hn_own.last_list = _remove_surname_titles(hn_own.last_list)
hn_own.last = " ".join(hn_own.last_list)
# if the last names doesnt match, we skip here
own_lasts = " ".join([on.lower() for on in hn_own.last_list])
other_lasts = " ".join([on.lower() for on in hn_other.last_list])
# compound surnames
if "-" in own_lasts or "-" in other_lasts:
own_lasts_splitted = own_lasts.split("-")
other_lasts_splitted = other_lasts.split("-")
matches = 0
for o in own_lasts_splitted:
for ot in other_lasts_splitted:
if o == ot or distance(o, ot) <= 1 and (len(o) >= 5 or len(ot) >= 5):
matches += 1
for o in reversed(own_lasts_splitted):
for ot in other_lasts_splitted:
if o == ot or distance(o, ot) <= 1 and (len(o) >= 5 or len(ot) >= 5):
matches += 1
if matches < 2:
return False
elif own_lasts[0] != other_lasts[0] or (own_lasts != other_lasts and distance(own_lasts, other_lasts) > 1):
return False
def _match_name_list(name: str, other: List[str]):
if name in other:
# full name match
return True
elif name.endswith(".") and name in ["{}.".format(f[0:len(name) - 1]) for f in other]:
# A. name match
return True
elif len(name) == 1 and name in [f[0] for f in other]:
# A name match
return True
return False
def _compare_names(a: List[str], b: List[str]) -> bool:
m_a = list(map(lambda n: _match_name_list(n, b), a))
m_b = list(map(lambda n: _match_name_list(n, a), b))
return m_a.count(True) >= m_a.count(False) or m_b.count(
True) >= m_b.count(False)
# check if the firstnames matches (if one side has no firstname we assume a match
first_name_matches = True if (hn_own.first == "" or hn_other.first == "") else _compare_names(hn_own.first_list,
hn_other.first_list)
own_first_middles = hn_own.first + hn_own.middle
other_first_middles = hn_other.first + hn_other.middle
# check if the firstnames+middlename matches (if one side has no firstname we assume a match
first_name_matches_fuzzy = own_first_middles.lower() == other_first_middles.lower() or (
own_first_middles.startswith(other_first_middles) or other_first_middles.startswith(own_first_middles))
if first_name_matches is False or first_name_matches_fuzzy is False:
# if the initials dont match, dont match
if (len(hn_own.first) > 0 and len(hn_own.first) > 0) and hn_own.first[0] != hn_other.first[0]:
return False
# if the names are longer than 5 and start with the same letter we allow tiny typos
l_distance = distance(hn_own.first, hn_other.first)
if l_distance < 2 and (len(hn_other.first) >= 5 or len(hn_own.first) >= 5):
first_name_matches = True
# if none has middle name its a match
if len(hn_own.middle_list) == 0 and len(hn_other.middle_list) == 0:
return first_name_matches or first_name_matches_fuzzy
# if only one side has a middle name its a match
if len(hn_own.middle_list) == 0 and len(hn_other.middle_list) > 0 or len(hn_own.middle_list) > 0 and len(
hn_other.middle_list) == 0:
return first_name_matches or first_name_matches_fuzzy
return _compare_names(hn_own.middle_list, hn_other.middle_list)
|
6987deb8695f823cd5e1e0948bd2a21dc33759bd
| 3,645,245
|
def price_setting():
""" Sets prices """
purchasing_price = float(input("enter purchasing price: "))
new_supplier = str(input("First time user(Y/N)?: ")).lower()
if new_supplier not in ['n', 'y']:
return True, {"errorMsg": f"{new_supplier} not a valid response"}, None
if new_supplier == 'y':
days_since_reg = int(input("Enter the days since you registered?: "))
if days_since_reg < 60:
list_price = purchasing_price*2
discount_percent = 0
profit_percent = ((list_price - purchasing_price)
* 100)/purchasing_price
return False, discount_percent, profit_percent
product_reg_days = int(
input("Enter the days you had registered this product?: "))
if product_reg_days < 0:
return True, {
"errorMsg": f"{product_reg_days} is not an acceptable value"
}, None
if product_reg_days > 30:
list_price = purchasing_price*2
discount_percent, profit_percent = decide_discount(
purchasing_price*2, purchasing_price)
return False, discount_percent, profit_percent
list_price = purchasing_price*2
discount_percent = 0
profit_percent = (list_price - purchasing_price)*100/purchasing_price
return False, discount_percent, profit_percent
discount_percent, profit_percent = decide_discount(
purchasing_price*2, purchasing_price)
return False, discount_percent, profit_percent
|
5d6b98b62ff7b6c8c3cc4278c3a4f2eb7c5f0f27
| 3,645,246
|
def d4_grid():
"""Test functionality of routing when D4 is specified.
The elevation field in this test looks like::
1 2 3 4 5 6 7
1 2 3 0 5 0 7
1 2 3 4 0 0 7
1 2 3 0 5 6 7
1 2 0 0 0 6 7
1 2 3 0 5 6 7
1 2 3 4 5 6 7
"""
mg1 = RasterModelGrid(7, 7, 1.)
mg2 = RasterModelGrid(7, 7, 1.)
z = mg1.node_x.copy() + 1.
lake_nodes = np.array([10, 16, 17, 18, 24, 32, 33, 38, 40])
z[lake_nodes] = 0.
mg1.add_field("node", "topographic__elevation", z, units="-")
mg2.add_field("node", "topographic__elevation", z, units="-")
frD8 = FlowRouter(mg1, method="D8")
frD4 = FlowRouter(mg2, method="D4")
lfD8 = DepressionFinderAndRouter(mg1, routing="D8")
lfD4 = DepressionFinderAndRouter(mg2, routing="D4")
class DansGrid(object):
pass
d4_grid = DansGrid()
d4_grid.mg1 = mg1
d4_grid.mg2 = mg2
d4_grid.z = z
d4_grid.lake_nodes = lake_nodes
d4_grid.frD8 = frD8
d4_grid.frD4 = frD4
d4_grid.lfD8 = lfD8
d4_grid.lfD4 = lfD4
return d4_grid
|
ed285c91cc4cda270a469a0271d1b935f1043d32
| 3,645,247
|
import inspect
def pass_complex_ins(mqc):
"""
The number of PASS complex insertions.
Source: count_variants.py (bcftools view)
"""
k = inspect.currentframe().f_code.co_name
try:
d = next(iter(mqc["multiqc_npm_count_variants"].values()))
v = d["pass_complex_ins"]
v = int(v)
except KeyError:
v = "NA"
return k, v
|
df7177b126829dca4a71252e797a2cb7d7d24ee3
| 3,645,248
|
def get_jwt():
"""
Get authorization token and validate its signature against the public key
from /.well-known/jwks endpoint
"""
expected_errors = {
KeyError: WRONG_PAYLOAD_STRUCTURE,
AssertionError: JWK_HOST_MISSING,
InvalidSignatureError: WRONG_KEY,
DecodeError: WRONG_JWT_STRUCTURE,
InvalidAudienceError: WRONG_AUDIENCE,
TypeError: KID_NOT_FOUND
}
token = get_auth_token()
try:
jwks_payload = jwt.decode(token, options={'verify_signature': False})
assert 'jwks_host' in jwks_payload
jwks_host = jwks_payload.get('jwks_host')
key = get_public_key(jwks_host, token)
aud = request.url_root
payload = jwt.decode(
token, key=key, algorithms=['RS256'], audience=[aud.rstrip('/')]
)
set_ctr_entities_limit(payload)
return payload['key']
except tuple(expected_errors) as error:
message = expected_errors[error.__class__]
raise AuthorizationError(message)
|
92b757e3fa9774ac7e93fc0f89c446efefc47b33
| 3,645,249
|
def convert_categorical(df, col_old, conversion, col_new=None):
"""Convet categories"""
if col_new is None:
col_new = col_old
orig_values = df[col_old].values
good_rows = np.isin(orig_values, list(conversion))
df = df.iloc[good_rows]
orig_values = df[col_old].values
cat_values = np.zeros(len(df), dtype=int)
for src, dest in conversion.items():
cat_values[orig_values == src] = dest
df.loc[:, col_new] = cat_values
return df
|
db07bb08f302edf615cab96b04b263f66fa9b8b1
| 3,645,251
|
def _get_pipeline_configs(force=False):
"""
Connects to Shotgun and retrieves information about all projects
and all pipeline configurations in Shotgun. Adds this to the disk cache.
If a cache already exists, this is used instead of talking to Shotgun.
To force a re-cache, set the force flag to True.
Returns a complex data structure with the following fields
local_storages:
- id
- code
- windows_path
- mac_path
- linux_path
pipeline_configurations:
- id
- code
- windows_path
- linux_path
- mac_path
- project
- project.Project.tank_name
:param force: set this to true to force a cache refresh
:returns: dictionary with keys local_storages and pipeline_configurations.
"""
CACHE_KEY = "paths"
if force == False:
# try to load cache first
# if that doesn't work, fall back on shotgun
cache = _load_lookup_cache()
if cache and cache.get(CACHE_KEY):
# cache hit!
return cache.get(CACHE_KEY)
# ok, so either we are force recomputing the cache or the cache wasn't there
sg = shotgun.get_sg_connection()
# get all local storages for this site
local_storages = sg.find("LocalStorage",
[],
["id", "code", "windows_path", "mac_path", "linux_path"])
# get all pipeline configurations (and their associated projects) for this site
pipeline_configs = sg.find("PipelineConfiguration",
[["project.Project.tank_name", "is_not", None]],
["id",
"code",
"windows_path",
"linux_path",
"mac_path",
"project",
"project.Project.tank_name"])
# cache this data
data = {"local_storages": local_storages, "pipeline_configurations": pipeline_configs}
_add_to_lookup_cache(CACHE_KEY, data)
return data
|
3c504606e5a751e0015abbdcf74a3e2513d4d280
| 3,645,252
|
def info(parentwindow, message, buttons, *,
title=None, defaultbutton=None):
"""Display an information message."""
return _message('info', parentwindow, message, title, buttons,
defaultbutton)
|
060e41cde2e83bdeab3fa3147caebabb3292923a
| 3,645,254
|
def gru(xs, lengths, init_hidden, params):
"""RNN with GRU. Based on https://github.com/google/jax/pull/2298"""
def apply_fun_single(state, inputs):
i, x = inputs
inp_update = jnp.matmul(x, params["update_in"])
hidden_update = jnp.dot(state, params["update_weight"])
update_gate = nn.sigmoid(inp_update + hidden_update)
reset_gate = nn.sigmoid(
jnp.matmul(x, params["reset_in"]) + jnp.dot(state, params["reset_weight"])
)
output_gate = update_gate * state + (1 - update_gate) * jnp.tanh(
jnp.matmul(x, params["out_in"])
+ jnp.dot(reset_gate * state, params["out_weight"])
)
hidden = jnp.where((i < lengths)[:, None], output_gate, jnp.zeros_like(state))
return hidden, hidden
init_hidden = jnp.broadcast_to(init_hidden, (xs.shape[1], init_hidden.shape[1]))
return jax.lax.scan(apply_fun_single, init_hidden, (jnp.arange(xs.shape[0]), xs))
|
76c4ca1f90ba5cefc4227197d70c93c358d5f1d1
| 3,645,256
|
def get_strings_in_flattened_sequence(p):
"""
Traverses nested sequence and for each element, returns first string encountered
"""
if p is None:
return []
#
# string is returned as list of single string
#
if isinstance(p, path_str_type):
return [p]
#
# Get all strings flattened into list
#
return get_strings_in_flattened_sequence_aux(p)
|
3de6829386d7877b745277cae88e3b3e6ac889a3
| 3,645,257
|
def kansuji2arabic(string, sep=False):
"""漢数字をアラビア数字に変換"""
def _transvalue(sj, re_obj=re_kunit, transdic=TRANSUNIT):
unit = 1
result = 0
for piece in reversed(re_obj.findall(sj)):
if piece in transdic:
if unit > 1:
result += unit
unit = transdic[piece]
else:
val = int(piece) if piece.isdecimal() else _transvalue(piece)
result += val * unit
unit = 1
if unit > 1:
result += unit
return result
transuji = string.translate(tt_ksuji)
for suji in sorted(set(re_suji.findall(transuji)), key=lambda s: len(s),
reverse=True):
if not suji.isdecimal():
arabic = _transvalue(suji, re_manshin, TRANSMANS)
arabic = '{:,}'.format(arabic) if sep else str(arabic)
transuji = transuji.replace(suji, arabic)
return transuji
|
4787618585baf660164d1c676ac7cae2750fe239
| 3,645,258
|
def get_image(member_status=None, most_recent=None, name=None, owner=None, properties=None, region=None, size_max=None, size_min=None, sort_direction=None, sort_key=None, tag=None, visibility=None):
"""
Use this data source to get the ID of an available OpenStack image.
"""
__args__ = dict()
__args__['memberStatus'] = member_status
__args__['mostRecent'] = most_recent
__args__['name'] = name
__args__['owner'] = owner
__args__['properties'] = properties
__args__['region'] = region
__args__['sizeMax'] = size_max
__args__['sizeMin'] = size_min
__args__['sortDirection'] = sort_direction
__args__['sortKey'] = sort_key
__args__['tag'] = tag
__args__['visibility'] = visibility
__ret__ = pulumi.runtime.invoke('openstack:images/getImage:getImage', __args__)
return GetImageResult(
checksum=__ret__.get('checksum'),
container_format=__ret__.get('containerFormat'),
disk_format=__ret__.get('diskFormat'),
file=__ret__.get('file'),
metadata=__ret__.get('metadata'),
min_disk_gb=__ret__.get('minDiskGb'),
min_ram_mb=__ret__.get('minRamMb'),
protected=__ret__.get('protected'),
region=__ret__.get('region'),
schema=__ret__.get('schema'),
size_bytes=__ret__.get('sizeBytes'),
updated_at=__ret__.get('updatedAt'),
id=__ret__.get('id'))
|
60fc60c558ac3c2e60fcdb73bf72a9d2bbc20855
| 3,645,259
|
def sort_course_dicts(courses):
""" Sorts course dictionaries
@courses: iterable object containing dictionaries representing courses.
Each course must have a course_number and abbreviation key
@return: returns a new list containing the given courses, in naturally sorted order.
"""
detailed_courses = [{
"course": course,
"numeric_course_number": int(extract_numeric_component(course["course_number"])),
"prefix": extract_prefix(course["course_number"]),
"suffix": extract_suffix(course["course_number"])
} for course in courses]
detailed_courses.sort(key=lambda course: course["suffix"])
detailed_courses.sort(key=lambda course: course["prefix"])
detailed_courses.sort(key=lambda course: course["numeric_course_number"])
detailed_courses.sort(key=lambda course: course["course"]["abbreviation"])
return [course_detail["course"] for course_detail in detailed_courses]
|
f715cf1db4c77bd3412290bc870731e0d923871b
| 3,645,260
|
import glob
import re
import numpy
def read_folder(filepath):
"""
Reads multiple image files from a folder and returns the resulting stack.
To find the images in the right order, a regex is used which will search
for files with the following pattern:
[prefix]_p[Nr][suffix]. The start number doesn't need to be 0.
The files are sorted with a natural sort, meaning that files like
0002, 1, 004, 3 will be sorted as 1, 0002, 3, 004.
The follwing regex is used to find the measurements:
".*_+p[0-9]+_?.*\.(tif{1,2}|jpe*g|nii|h5|png)"
Supported file formats for the image file equal the supported formats of
SLIX.imread.
Args:
filepath: Path to folder
Returns:
numpy.array: Image with shape [x, y, z] where [x, y] is the size
of a single image and z specifies the number of measurements
"""
files_in_folder = glob.glob(filepath + '/*')
matching_files = []
for file in files_in_folder:
if re.match(_fileregex, file) is not None:
matching_files.append(file)
matching_files.sort(key=__natural_sort_filenames_key)
image = None
# Check if files contain the needed regex for our measurements
for file in matching_files:
measurement_image = imread(file)
if image is None:
image = measurement_image
elif len(image.shape) == 2:
image = numpy.stack((image, measurement_image), axis=-1)
else:
image = numpy.concatenate((image,
measurement_image
[:, :, numpy.newaxis]), axis=-1)
return image
|
d50dc5ef09931b7950c91b5ea2f07eaa0d90cba1
| 3,645,261
|
from presqt.targets.zenodo.utilities.helpers.get_zenodo_children import zenodo_get_children
def zenodo_fetch_resource_helper(zenodo_project, resource_id, is_record=False, is_file=False):
"""
Takes a Zenodo deposition/record and builds a Zenodo PresQT resource.
Parameters
----------
zenodo_project : dict
The requested Zenodo project.
is_record : boolean
Flag for if the resource is a published record
is_file : boolean
Flag for if the resource is a file
Returns
-------
PresQT Zenodo Resource (dict).
"""
identifier = None
if is_file is False:
if is_record is True:
kind_name = zenodo_project['metadata']['resource_type']['type']
date_modified = zenodo_project['updated']
identifier = zenodo_project['doi']
else:
kind_name = zenodo_project['metadata']['upload_type']
date_modified = zenodo_project['modified']
kind = 'container'
title = zenodo_project['metadata']['title']
hashes = {}
extra = {}
for key, value in zenodo_project['metadata'].items():
if key != 'doi':
extra[key] = value
children = zenodo_get_children(zenodo_project, resource_id, is_record)
else:
kind = 'item'
kind_name = 'file'
title = zenodo_project['key']
date_modified = zenodo_project['updated']
hashes = {'md5': zenodo_project['checksum'].partition(':')[2]}
extra = {}
children = []
return {
"kind": kind,
"kind_name": kind_name,
"id": resource_id,
"identifier": identifier,
"title": title,
"date_created": zenodo_project['created'],
"date_modified": date_modified,
"hashes": hashes,
"extra": extra,
"children": children}
|
8427456ea648d1a5f4b5a0ee3baffc28649184aa
| 3,645,262
|
import json
def add_role_menu(request):
"""菜单授权"""
menu_nums = request.POST.get("node_id_json")
role_id = request.POST.get("role_id")
role_obj = auth_db.Role.objects.get(id=role_id)
menu_nums = json.loads(menu_nums)
role_obj.menu.clear()
for i in menu_nums:
menu_obj = auth_db.Menus.objects.get(menu_num=i)
role_obj.menu.add(menu_obj)
data = "授权已更新,重新登录即生效!"
return HttpResponse(data)
|
31b8d2eb62ad105c4e44f6af7fa75cde2746d2f0
| 3,645,263
|
def oauth_url(auth_base, country, language):
"""Construct the URL for users to log in (in a browser) to start an
authenticated session.
"""
url = urljoin(auth_base, 'login/sign_in')
query = urlencode({
'country': country,
'language': language,
'svcCode': SVC_CODE,
'authSvr': 'oauth2',
'client_id': CLIENT_ID,
'division': 'ha',
'grant_type': 'password',
})
return '{}?{}'.format(url, query)
|
d932d161ead93510e2a4b05c20f87982c726e158
| 3,645,265
|
def teammsg(self: Client, message: str) -> str:
"""Sends a team message."""
return self.run('teammsg', message)
|
97a3a9c99af17fcf183d72158ec5dc9b5ad3689d
| 3,645,266
|
def h_html_footnote(e, doc):
"""Handle footnotes with bigfoot"""
if not isinstance(e, pf.Note) or doc.format != "html":
return None
htmlref = rf'<sup id="fnref:{doc.footnotecounter}"><a href="#fn:{doc.footnotecounter}" rel="footnote">{doc.footnotecounter}</a></sup>'
htmlcontent_before = rf'<li class="footnote" id="fn:{doc.footnotecounter}"><p>'
htmlcontent_after = rf'<a href="#fnref:{doc.footnotecounter}" title="return to article"> ↩</a><p></li>'
doc.footnotecounter += 1
conts = pf.Div(*e.content)
doc.footnotecontents += (
[pf.RawBlock(htmlcontent_before, format="html")]
+ [conts]
+ [pf.RawBlock(htmlcontent_after, format="html")]
)
return pf.RawInline(htmlref, format="html")
|
462f4886cc7b4be46b3904abc3396096f36d7938
| 3,645,268
|
def segment(x,u1,u2):
""" given a figure x, create a new figure spanning the specified interval in the original figure
"""
if not (isgoodnum(u1) and isgoodnum(u2)) or close(u1,u2) or u1<0 or u2 < 0 or u1 > 1 or u2 > 1:
raise ValueError('bad parameter arguments passed to segment: '+str(u1)+', '+str(u2))
if ispoint(x):
return deepcopy(x)
elif isline(x):
return segmentline(x,u1,u2)
elif isarc(x):
return segmentarc(x,u1,u2)
elif ispoly(x):
return segmentpoly(x,u1,u2)
elif isgeomlist(x):
return segmentgeomlist(x,u1,u2)
else:
raise ValueError("inappropriate figure type for segment(): "+str(x))
|
291ddfb011ece20840a4a56fdc7bc87f2187625f
| 3,645,269
|
def box_from_anchor_and_target(anchors, regressed_targets):
"""
Get bounding box from anchor and target through transformation provided in the paper.
:param anchors: Nx4 anchor boxes
:param regressed_targets: Nx4 regression targets
:return:
"""
boxes_v = anchors[:, 2] * regressed_targets[:, 0] / 10.0 + anchors[:, 0]
boxes_u = anchors[:, 3] * regressed_targets[:, 1] / 10.0 + anchors[:, 1]
boxes_h = anchors[:, 2] * \
tf.clip_by_value(tf.exp(regressed_targets[:, 2] / 5.0), 1e-4, 1e4)
boxes_w = anchors[:, 3] * \
tf.clip_by_value(tf.exp(regressed_targets[:, 3] / 5.0), 1e-4, 1e4)
return tf.stack([boxes_v,
boxes_u,
boxes_h,
boxes_w], axis=1)
|
c55a54535fbfa67502c1b65ec71c23d772dedd7e
| 3,645,271
|
def block_device_mapping_update(context, bdm_id, values, legacy=True):
"""Update an entry of block device mapping."""
return IMPL.block_device_mapping_update(context, bdm_id, values, legacy)
|
7959cc6c849cb599c719e21f8c8315a7bc7ddd09
| 3,645,272
|
def consumer(address,callback,message_type):
"""
Creates a consumer binding to the given address pull messages.
The callback is invoked for every reply received.
Args:
- address: the address to bind the PULL socket to.
- callback: the callback to invoke for every message. Must accept 1 variables - the message
- message_type: the type of message to receive
"""
return Consumer(address,callback,message_type)
|
a1c01bafa4f65ba0a0a212916556c36315fe2c88
| 3,645,273
|
def _parse_continuous_records(prepared_page, section_dict):
"""Handle parsing a continuous list of records."""
# import pdb; pdb.set_trace()
columns = 6
start = prepared_page.index('Date and time')
for i, column in enumerate(prepared_page[start:start + columns]):
column_index = start + i
values = prepared_page[column_index + columns::columns]
if column in section_dict:
section_dict[column] = section_dict[column] + values
else:
section_dict[column] = values
return section_dict
|
7ddcb52433828d37ce6e0cac5d51d8fcfb249296
| 3,645,274
|
def power_law_at_2500(x, amp, slope, z):
""" Power law model anchored at 2500 AA
This model is defined for a spectral dispersion axis in Angstroem.
:param x: Dispersion of the power law
:type x: np.ndarray
:param amp: Amplitude of the power law (at 2500 A)
:type amp: float
:param slope: Slope of the power law
:type slope: float
:param z: Redshift
:type z: float
:return: Power law model
:rtype: np.ndarray
"""
return amp * (x / (2500. * (z+1.))) ** slope
|
508227f332f652d00c785074c20f9acefbce9258
| 3,645,275
|
def map2alm(
maps,
lmax=None,
mmax=None,
iter=3,
pol=True,
use_weights=False,
datapath=None,
gal_cut=0,
use_pixel_weights=False,
):
"""Computes the alm of a Healpix map. The input maps must all be
in ring ordering.
Parameters
----------
maps : array-like, shape (Npix,) or (n, Npix)
The input map or a list of n input maps. Must be in ring ordering.
lmax : int, scalar, optional
Maximum l of the power spectrum. Default: 3*nside-1
mmax : int, scalar, optional
Maximum m of the alm. Default: lmax
iter : int, scalar, optional
Number of iteration (default: 3)
pol : bool, optional
If True, assumes input maps are TQU. Output will be TEB alm's.
(input must be 1 or 3 maps)
If False, apply spin 0 harmonic transform to each map.
(input can be any number of maps)
If there is only one input map, it has no effect. Default: True.
use_weights: bool, scalar, optional
If True, use the ring weighting. Default: False.
datapath : None or str, optional
If given, the directory where to find the weights data.
gal_cut : float [degrees]
pixels at latitude in [-gal_cut;+gal_cut] are not taken into account
use_pixel_weights: bool, optional
If True, use pixel by pixel weighting, healpy will automatically download the weights, if needed
Returns
-------
alms : array or tuple of array
alm or a tuple of 3 alm (almT, almE, almB) if polarized input.
Notes
-----
The pixels which have the special `UNSEEN` value are replaced by zeros
before spherical harmonic transform. They are converted back to `UNSEEN`
value, so that the input maps are not modified. Each map have its own,
independent mask.
"""
maps = ma_to_array(maps)
info = maptype(maps)
nside = pixelfunc.get_nside(maps)
check_max_nside(nside)
if use_pixel_weights:
if use_weights:
raise RuntimeError("Either use pixel or ring weights")
with data.conf.set_temp("dataurl", DATAURL), data.conf.set_temp(
"remote_timeout", 30
):
pixel_weights_filename = data.get_pkg_data_filename(
"full_weights/healpix_full_weights_nside_%04d.fits" % nside,
package="healpy",
)
else:
pixel_weights_filename = None
if pol or info in (0, 1):
alms = _sphtools.map2alm(
maps,
niter=iter,
datapath=datapath,
use_weights=use_weights,
lmax=lmax,
mmax=mmax,
gal_cut=gal_cut,
pixel_weights_filename=pixel_weights_filename,
)
else:
# info >= 2 and pol is False : spin 0 spht for each map
alms = [
_sphtools.map2alm(
mm,
niter=iter,
datapath=datapath,
use_weights=use_weights,
lmax=lmax,
mmax=mmax,
gal_cut=gal_cut,
pixel_weights_filename=pixel_weights_filename,
)
for mm in maps
]
return np.array(alms)
|
9312a6c5ee40fe9a3ef3f6057ee5964d200f9732
| 3,645,276
|
def credits():
"""
Credits Page
"""
return render_template("credits.html")
|
00fdc0be4c3abd3df21993b271977208252123df
| 3,645,277
|
def register_view(request):
"""Register a new user."""
if request.method == "POST":
user_form = UserRegistrationForm(request.POST)
if user_form.is_valid():
# Create a new User object but don't save it yet.
new_user = user_form.save(commit=False)
# Set the chosen password
new_user.set_password(
user_form.cleaned_data["password"]
)
# Save the User object
new_user.save()
# Create the user profile
Profile.objects.create(user=new_user)
context = {"user_form": user_form}
return render(request, "account/register_done.html", context)
else:
user_form = UserRegistrationForm()
return render(request, "account/register.html", {"user_form": user_form})
|
19fa639603c7ea67a76696e4b88aae376461988c
| 3,645,278
|
def menu(request):
"""
A View to return the menu.html
where all menu images are returned
in a carousel.
"""
menus = MenuImages.objects.all()
context = {
'menus': menus
}
return render(request, 'menu/menu.html', context)
|
9491d9e1d4084ed78d4aadfd867910e2d0511704
| 3,645,280
|
def wav_process(PATH, i):
"""
音频处理,在路径下读取指定序号的文件进行处理
Args:
PATH (str): 音频文件路径
i (int): 指定序号
Returns:
float: 计算得到的音源角度(单位:°)
"""
# 读取数据
wav, sr = read_wav(PATH, i + 1)
# 进行降噪
wav_rn = reduce_noise(wav)
# 计算角度
angle_list = estimate_angle(wav_rn, sr)
# 确定基准方向
angle_13, angle_24 = angle_list[1], angle_list[4]
theta13p, theta13n = (180 + angle_13) % 360, 180 - angle_13
theta24p, theta24n = (270 + angle_24) % 360, 270 - angle_24
if angle_13 > 15 and angle_13 < 165:
if ((theta24p > theta13p - 10 and theta24p < theta13p + 10) or
(theta24p + 360 > theta13p - 10 and theta24p + 360 < theta13p + 10)
or (theta24n > theta13p - 10 and theta24n < theta13p + 10)
or (theta24n + 360 > theta13p - 10
and theta24n + 360 < theta13p + 10)):
scope_mid = theta13p
else:
scope_mid = theta13n
else:
if ((theta13p > theta24p - 10 and theta13p < theta24p + 10) or
(theta13p + 360 > theta24p - 10 and theta13p + 360 < theta24p + 10)
or (theta13n > theta24p - 10 and theta13n < theta24p + 10)
or (theta13n + 360 > theta24p - 10
and theta13n + 360 < theta24p + 10)):
scope_mid = theta24p
else:
scope_mid = theta24n
angle_base = [135, 180, 225, 225, 270, 315]
processed_angle = []
sum = 0
weights = 0
for i, elem in enumerate(angle_list):
if elem > 15 and elem < 165:
# 加权计算最终角度
if elem > 65 and elem < 115:
weight = 100
else:
weight = 1
ap = (angle_base[i] + elem + 360) % 360
an = (angle_base[i] - elem + 360) % 360
if ap > scope_mid - 10 and ap < scope_mid + 10:
processed_angle.append(ap)
sum = sum + ap * weight
weights = weights + weight
else:
processed_angle.append(an)
sum = sum + an * weight
weights = weights + weight
return sum / weights
|
ee336928b4b5e221a72ba8b6509555666ff3b763
| 3,645,281
|
def extract_vuln_id(input_string):
"""
Function to extract a vulnerability ID from a message
"""
if 'fp' in input_string.lower():
wordlist = input_string.split()
vuln_id = wordlist[-1]
return vuln_id
else:
return None
|
06673f2b401472185c8a3e6fc373d39c171791db
| 3,645,282
|
def compare_images(img1, img2):
"""Expects strings of the locations of two images. Will return an integer representing their difference"""
with Image.open(img1) as img1, Image.open(img2) as img2:
# Calculate a difference image that is the difference between the two images.
diff = ImageChops.difference(img1, img2)
return sum(_unpack_image(diff.getdata())[1])
|
bc94987785a5731e71a1e25daae51179c415eda6
| 3,645,284
|
def bytes_to_nodes(buf):
""" Return a list of ReadNodes corresponding to the bytes in buf.
@param bytes buf: a bytes object
@rtype: list[ReadNode]
>>> bytes_to_nodes(bytes([0, 1, 0, 2]))
[ReadNode(0, 1, 0, 2)]
"""
lst = []
for i in range(0, len(buf), 4):
l_type = buf[i]
l_data = buf[i+1]
r_type = buf[i+2]
r_data = buf[i+3]
lst.append(ReadNode(l_type, l_data, r_type, r_data))
return lst
|
1296b49f5d76605d4408eddf21b76f286dfc5f5b
| 3,645,285
|
def list_unnecessary_loads(app_label=None):
"""
Scan the project directory tree for template files and process each and
every one of them.
:app_label: String; app label supplied by the user
:returns: None (outputs to the console)
"""
if app_label:
app = get_app(app_label)
else:
app = None
dt_engines = get_djangotemplates_engines()
for dt_engine in dt_engines:
has_issues = False
templates = []
# Get the locations of installed packages
pkg_locations = get_package_locations()
# Get template directories located within the project
for directory in dt_engine.template_dirs:
templates += get_templates(directory, pkg_locations, app)
if templates:
for template in templates:
status = process_template(template, dt_engine.engine)
if status:
has_issues = status
if not has_issues:
output_message(reason=3)
else:
output_message(reason=1)
return has_issues
|
d78aeb6132e4f79f4458454f6107f9003db37999
| 3,645,287
|
from typing import Any
def _element(
html_element: str,
html_class: str,
value: Any,
is_visible: bool,
**kwargs,
) -> dict:
"""
Template to return container with information for a <td></td> or <th></th> element.
"""
if "display_value" not in kwargs:
kwargs["display_value"] = value
return {
"type": html_element,
"value": value,
"class": html_class,
"is_visible": is_visible,
**kwargs,
}
|
4ce4d2ff9f547470d4a875508c40d3ae2a927ba0
| 3,645,288
|
import torch
def lovasz_hinge_loss(pred, target, crop_masks, activation='relu', map2inf=False):
"""
Binary Lovasz hinge loss
pred: [P] Variable, logits at each prediction (between -\infty and +\infty)
target: [P] Tensor, binary ground truth labels (0 or 1)
"""
losses = []
for m, p, t in zip(crop_masks, pred, target): # > imgs
num_objs = t.size()[0]
loss = t.new_tensor(0.0)
for i in range(num_objs):
if len(p[i]) > 0:
loss += lovasz_hinge_loss_single(p[i][m[i]].view(-1),
t[i][m[i]].view(-1),
activation=activation,
map2inf=map2inf)
if num_objs > 0:
loss /= num_objs
losses.append(loss)
losses = torch.stack(losses)
return losses
|
c1d7ce49feda1a2ba1116d03de3ba8a5b9ad65a9
| 3,645,290
|
def get_gene_summary(gene):
"""Gets gene summary from a model's gene."""
return {
gene.id: {
"name": gene.name,
"is_functional": gene.functional,
"reactions": [{rxn.id: rxn.name} for rxn in gene.reactions],
"annotation": gene.annotation,
"notes": gene.notes,
}
}
|
dd9cb3f8e9841a558898c67a16a02da1b39479d2
| 3,645,291
|
def prompt_choice_list(msg, a_list, default=1, help_string=None):
"""Prompt user to select from a list of possible choices.
:param msg:A message displayed to the user before the choice list
:type msg: str
:param a_list:The list of choices (list of strings or list of dicts with 'name' & 'desc')
"type a_list: list
:param default:The default option that should be chosen if user doesn't enter a choice
:type default: int
:returns: The list index of the item chosen.
"""
verify_is_a_tty()
options = "\n".join(
[
" [{}] {}{}".format(
i + 1,
x["name"] if isinstance(x, dict) and "name" in x else x,
" - " + x["desc"] if isinstance(x, dict) and "desc" in x else "",
)
for i, x in enumerate(a_list)
]
)
allowed_vals = list(range(1, len(a_list) + 1))
while True:
val = _input(
"{}\n{}\nPlease enter a choice [Default choice({})]: ".format(msg, options, default)
)
if val == "?" and help_string is not None:
print(help_string)
continue
if not val:
val = "{}".format(default)
try:
ans = int(val)
if ans in allowed_vals:
# array index is 0-based, user input is 1-based
return ans - 1
raise ValueError
except ValueError:
logger.warning("Valid values are %s", allowed_vals)
|
dc5f077d3710420b9d9b26032ee340c0671d009d
| 3,645,292
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.