content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
from typing import Sequence
def flatten(nested: Sequence) -> list:
"""
Return vectorized (1D) list from nested Sequence ``nested``.
Args:
nested: Sequence
Iterable sequence containing multiple other nested sequences.
Returns: list
Vectorized (unidimensional) version of ``nested``.
"""
return [lowest for sublist in nested for lowest
in (flatten(sublist)
if bool(isinstance(sublist, Sequence)
and not isinstance(sublist, str))
else [sublist])] | d0acc2359a5dbf1605aedb86fe205e4a3c686f63 | 691,845 |
import os
def get_envvar(prefix="PF_"):
"""get all variables that start with the given prefix."""
prefix_len = len(prefix)
return {
k[prefix_len:].lower(): v.strip()
for k, v in os.environ.items()
if k.startswith(prefix)
} | 2f75e8036ad603333481cdda6b562e5ec49e907b | 691,846 |
import os
import tempfile
def create_local_temp_dir(name, directory=None):
"""
Create a directory for storing temporary files needed for testing neo
If directory is None or not specified, automatically create the directory
in {tempdir}/files_for_testing_neo on linux/unix/mac or
{tempdir}\files_for_testing_neo on windows, where {tempdir} is the system
temporary directory returned by tempfile.gettempdir().
"""
if directory is None:
directory = os.path.join(tempfile.gettempdir(),
'files_for_testing_neo')
if not os.path.exists(directory):
os.mkdir(directory)
directory = os.path.join(directory, name)
if not os.path.exists(directory):
os.mkdir(directory)
return directory | 4a0924a723bf886d8915399b390bf38f561324ca | 691,847 |
def split_dataset(X, Y, validation = True, supertest = True, train_cut = 0.7, validation_cut = 0.8, test_cut = 0.9):
"""
Splits dataset into train, test, validation and supertest categories
"""
train_cut = round(X.shape[0] * train_cut)
validaiton_cut = round(X.shape[0] * validation_cut)
test_cut = round(X.shape[0] * test_cut)
x_train = X[:train_cut]
y_train = Y[:train_cut]
if validation == True:
x_validation = X[train_cut:validaiton_cut]
y_validation = Y[train_cut:validaiton_cut]
x_test = X[validaiton_cut:test_cut]
y_test = Y[validaiton_cut:test_cut]
else:
x_validation = None
y_validation = None
x_test = X[train_cut:test_cut]
y_test = Y[train_cut:test_cut]
if supertest == True:
x_test = X[train_cut:test_cut]
y_test = Y[train_cut:test_cut]
x_supertest = X[test_cut:]
y_supertest = Y[test_cut:]
else:
x_test = X[train_cut:]
y_test = Y[train_cut:]
x_supertest = None
y_supertest = None
return x_train, y_train, x_validation, y_validation, x_test, y_test, x_supertest, y_supertest | d21926f7f41b0b880b2be2dc4adbd0bffbc50d63 | 691,848 |
import subprocess
def run_command_with_stderr(command):
"""Runs a command in a shell using subprocess.Popen.
Returns stdout and stderr as outputs."""
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
(stdout, stderr) = p.communicate()
return stdout, stderr | ada1f64230902f2762c24e563bcd84a5a068d9d2 | 691,849 |
import time
def utcut():
"""-> UTC Unix Time now. Uses library time.time()."""
return time.time() | e6a7a96532b3ff60e75626da754ad77934728701 | 691,850 |
import math
def calc_conv_dBZlim(coef):
""" Calculate dBZ limit for convective/frontal case Z-R calculation.
Limit with default values is 23.48 dBZ.
Keyword arguments:
coef -- dictionary containing Z(R) A and B coefficients zr_a, zr_b, zr_a_c and zr_a_c (c for convective rain)
Return:
conv_dbzlim -- Limit dBZ value for convective rain rate calculation
"""
zr_a = coef['zr_a']
zr_b = coef['zr_b']
zr_a_c = coef['zr_a_c']
zr_b_c = coef['zr_b_c']
if zr_a == zr_a_c:
conv_dbzlim = 10.0 * math.log10(zr_a)
else:
R = (zr_a / zr_a_c) ** (1.0 / (zr_b_c - zr_b))
conv_dbzlim = 10.0 * math.log10(zr_a * (R ** zr_b))
return conv_dbzlim | 6bd585566776bb9c344c85b3615a677d1d0f2e02 | 691,851 |
import csv
def get_rows(filepath):
"""
Gets rows from a csv. However, this will need processing
:returns rows: List of strings
"""
rows = []
with open(filepath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=" ", quotechar="|")
for row in csvreader:
rows.append(row)
return list(reversed(rows)) | 351f7a4033c576736bd8de4eb317282e530a7b8c | 691,852 |
from typing import Dict
from typing import Any
from typing import Optional
import requests
def post_request(url : str, data : Dict[str, Any], session : Optional[requests.Session] = None) -> requests.Response:
"""
Post a request to the url with the given data,
optionally using a provided session.
Parameters
----------
url: str
The url to post to.
data: dict[str, Any]
The json data to include in the post request.
session: requests.Session, optional
The persistent session to use, if None is provided
a new one will be created and destroyed for the
individual call.
"""
headers = {
'Content-Type': 'application/json'
}
if session is not None:
return session.post(url, headers=headers, data=data)
return requests.post(url, headers=headers, data=data) | 3f451f42d0f1c9674430d2d9cb3165fbd594940f | 691,853 |
def gauss_kl_white_diag(q_mu, q_sqrt):
"""
Compute the KL divergence from
q(x) = N(q_mu, q_sqrt^2)
to
p(x) = N(0, I)
We assume multiple independent distributions, given by the columns of
q_mu and q_sqrt
q_mu is a matrix, each column contains a mean
q_sqrt is a matrix, each column represents the diagonal of a square-root
matrix of the covariance.
"""
KL = 0.5 * (q_mu**2).sum() # Mahalanobis term
KL += -0.5 * q_sqrt.numel()
KL = KL - q_sqrt.abs().log().sum() # Log-det of q-cov
KL += 0.5 * (q_sqrt**2).sum() # Trace term
return KL | 79d00f168257adbbf36f383f4531d39343eb9477 | 691,854 |
def get_venue_response(meetup_id: int = 1, content: bool = False) -> dict:
"""
create a Venue response
Keyword arguments:
meetup_id -- meetup id
content -- if True -> add optional fields
return -> venu dict
"""
response: dict = {
"id": meetup_id,
}
if content:
content_response: dict = {
"address_1": "Berlinerstr. 1",
"address_2": "oben",
"address_3": "unten",
"city": "Berlin",
"country": "Germany",
"lat": 52.520008,
"lon": 13.404954,
"localized_country_name": "Deutschland",
"name": "Meetup Place",
"phone": "030 123 456 789",
"zip_code": "10101",
}
return {**response, **content_response}
return response | 6cd103f57cda70b0a29ce0738fa7bde96678c4f6 | 691,856 |
def solution2array(solution):
""" rewrites an solution of the form {(1,1): [4], (1,2): [5] , .... (9,9) : [1]} to an 2dimensional array.
this is useful if we want to output it in a human readable form.
this is also used as intermediate step for rewriting the sudoku back to the original format.
"""
sudoku_array = []
for i in range(9):
sudoku_array.append([])
for j in range(9):
sudoku_array[i].append(0)
for variable, assignment in solution.iteritems():
if len(assignment) == 1:
sudoku_array[variable[0] -1][variable[1] - 1] = assignment[0]
return sudoku_array | 507b9dfe7b4c2e1296670c2d1a948c40098d7a3c | 691,857 |
def dmp_copy(f, u):
"""Create a new copy of a polynomial `f` in `K[X]`. """
if not u:
return list(f)
v = u-1
return [ dmp_copy(c, v) for c in f ] | b5898ecbddcc3bc081fef116db4c44c7f7f8c793 | 691,858 |
def locToLatLong(location):
"""
:param location: location in string format
:return: latitude and longtitude in float format
"""
long, lat = str(location).split(',')
long = float(long.split(':')[-1])
lat = float(lat.split(':')[-1][:-1])
return lat, long | 7776b4b3a4d5d7491b632a82c7b59312ffb8ea65 | 691,859 |
def judge_category(request_categories, article_category):
"""判断文章类别是否符合请求类别
Args:
request_categories (list): 请求类别
article_categories (string): 文章类别
Returns:
boolean: 是否符合
"""
# suggested by @canuse
# default setting
if len(request_categories) == 0:
return False
for req_category in request_categories:
if req_category in article_category:
return True
if (req_category == 'other') and (article_category.count('.') != len(article_category.split())):
return True
return False | 174e61ec6d9aa9cc091c4707ba39fb0aae9e68b1 | 691,860 |
def output_requirements(requirements):
"""Prepare print requirements to stdout.
:param dict requirements: mapping from a project to its pinned version
"""
return '\n'.join('{0}=={1}'.format(key, value)
for key, value in sorted(requirements.items())) | a6b35cf4712b493245b3960fdf8077107e476f8c | 691,861 |
import os
def master_file_name(full_file):
"""Creates the master file name from the file name with an extension"""
# removes the extension from a file and returns just the filename
return os.path.splitext(full_file)[0] | 719243b58c9a50463a32129ec6eb26f376cb858e | 691,862 |
def get_ax_size(fig, ax):
"""Get the size of an axis
Args:
- fig: figure handle
- ax: the axis handle
Returns:
- tuple: width, height
Scraped from stackoverflow, noref.
"""
bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
width *= fig.dpi
height *= fig.dpi
return width, height | 3275f83e6f3ddddfc60d3e7cfcbcda1885faceaa | 691,863 |
def render_core(url_prefix, auth_header, resources):
"""Generate res.core.js"""
code = ''
code += "function(root, init) {\n"
code += " var q = init('%(auth_header)s', '%(url_prefix)s');\n" %\
{'url_prefix': url_prefix, 'auth_header': auth_header}
code += " var r = null;\n"
for key in resources:
code += " r = root.%(key)s = {};\n" % {'key': key}
for action, item in resources[key].items():
code += " r.%(action)s = q('%(url)s', '%(method)s');\n" %\
{'action': action,
'url': item['url'],
'method': item['method']}
code += "}"
return code | 7029fc07044667d2e937e2e229013dd3c00a9c77 | 691,864 |
def camel_case(text):
"""Camel-cases text.
Parameters
----------
text : str
Text to be camel-cased.
Returns
-------
str
A camel-cased string.
"""
return ''.join(text.title().split()) | 1573935c9d9a89550c2c86dfa035b9f127d6796e | 691,865 |
import os
def read_main_conf() -> dict:
"""Reads config from env vars."""
return dict(os.environ) | 9c025c6be8f6c90fa41380e22366e856e6daaeed | 691,866 |
from typing import Counter
import math
import itertools
def control_smiles_duplication(random_smiles, duplicate_control=lambda x: 1):
"""
Returns augmented SMILES with the number of duplicates controlled by the function duplicate_control.
Parameters
----------
random_smiles : list
A list of random SMILES, can be obtained by `smiles_to_random()`.
duplicate_control : func, Optional, default: 1
The number of times a SMILES will be duplicated, as function of the number of times
it was included in `random_smiles`.
This number is rounded up to the nearest integer.
Returns
-------
list
A list of random SMILES with duplicates.
Notes
-----
When `duplicate_control=lambda x: 1`, then the returned list contains only unique SMILES.
"""
counted_smiles = Counter(random_smiles)
smiles_duplication = {
smiles: math.ceil(duplicate_control(counted_smiles[smiles]))
for smiles in counted_smiles
}
return list(
itertools.chain.from_iterable(
[[smiles] * smiles_duplication[smiles] for smiles in smiles_duplication]
)
) | 1de46f3f94f434668b9b25bd39cff89ee13fe07f | 691,867 |
def find_closest_ops(point, ops):
"""Find closest ops in given list that is closest to specified value
Returns a list of all ops that are have the minimum distance found."""
dist = 0
points = []
while len(points) == 0:
dist += 1
for op in ops:
calc_dist = 0
for i in range(len(op)):
calc_dist += abs(point[i] - op[i])
if calc_dist <= dist:
points.append(op)
return points | 60e40fff7de2f50e66bcffbff158a68e974c0f17 | 691,868 |
def pick_day(demand, date):
"""
generates the exact day you select from the format YYYY-MM-DD
"""
day = demand.loc[(demand['date'] == date)]
day.set_index('time',inplace=True)
return day, date | cbd142c63f3f031717e63a640842dde445a2be30 | 691,869 |
def offset(self, arrslice):
"""
Returns index in dbarray object along all dimensions of the [0,...,0] element of the slice.
"""
if not self.bounds is None:
# cached
return self.bounds
else:
# needs to compute
offset = 1
self.offset = offset
return offset | 3493b635325e0194c306cc87831b035aca187840 | 691,870 |
def aftype_sort(afl, ic):
"""
helper function to sort a list of aftype objects according to the type (ic)
"""
if ic == "bnd":
afl.sort()
elif ic == "ang":
if afl[0] > afl[2]: afl.reverse()
elif ic == "dih":
if afl[1] > afl[2]:
afl.reverse()
elif afl[1] == afl[2]:
if afl[0] > afl[3]: afl.reverse()
elif ic == "oop":
plane = afl[1:]
plane.sort()
afl[1:] = plane
elif ic == "vdwpr":
return aftype_sort(afl, "bnd")
return afl | 89c9f41c7e9308aa781925abfca3b802c88fe7fd | 691,871 |
def is_valid(host_port):
"""Checks if there are a host and a port."""
if len(host_port.split(":")) != 2:
return False
return True | 441773433c33b0a6b18e95073d01033e33bcf8fe | 691,872 |
def valid_alternative_image_text(arch):
"""An `img` tag must have an alt value."""
if arch.xpath('//img[not(@alt or @t-att-alt or @t-attf-alt)]'):
return "Warning"
return True | 53b02bd0c9ab7d3365e6b27fc0d943506b6d3e0c | 691,873 |
def get_check_result(result, numvals):
"""Check the result of a 'get' operation"""
if not isinstance(result, dict):
return "pwrcmd output is not a dict"
# Some errors return only the error itself
if (result['PWR_ReturnCode'] != 0 and
"attr_vals" not in result and
"timestamps" not in result):
return None
# attr_vals must exist as a list with numvals elements
if "attr_vals" not in result:
return "'attr_vals' not found"
if not isinstance(result['attr_vals'], list):
return "'attr_vals' is not a list"
if len(result['attr_vals']) != numvals:
return "expected {} attr_vals".format(numvals)
# timestamps must exist as a list with numvals elements
if "timestamps" not in result:
return "'timestamps' not found"
if not isinstance(result['timestamps'], list):
return "'timestamps' is not a list"
if len(result['timestamps']) != numvals:
return "expected {} timestamps".format(numvals)
# status must exist
if "status" not in result:
return "'status' not found"
return None | 0a5346e21ea260a8f2c49454854d109cab756a6b | 691,874 |
import random
import socket
def get_unused_port(port=None):
"""Checks if port is already in use."""
if port is None or port < 1024 or port > 65535:
port = random.randint(1024, 65535)
assert(1024 <= port <= 65535)
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', port)) # Try to open port
except socket.error as e:
if e.errno in (98, 10048): # 98, 10048 means address already bound
return get_unused_port(None)
raise e
s.close()
return port | 71ea5264b2a21f906a4cacc7951064e9b72ed5d7 | 691,875 |
def getx(data, keys, default=None, validator=None):
""" extended get of an attribute of the cluster API with, recoursion (deep get), defaulting & validation """
for key in keys.split('.'):
try:
data = data[key]
except KeyError:
if default != None:
return default
else:
raise KeyError("invalid cluster API definition. Key '%s' does not exist" % (keys))
if validator != None:
validator(data)
return data | 60ebd0c210adf5945ef3c4f12d58060edc68a4b5 | 691,876 |
from typing import Tuple
def ntp2parts(ntp: int) -> Tuple[int, int]:
"""Split NTP time into seconds and fraction."""
return ntp >> 32, ntp & 0xFFFFFFFF | 729b3f9ce912e1be54c0c5bafd9c5577a78091b9 | 691,877 |
def create_declarations_list_from_raw_data(raw_declarations):
"""
Creates a list of declarations where each passwort is a dictionary
"""
declarations_list = []
new_group = {'declarations':"", 'persons':0}
for line in raw_declarations:
if line.strip() != '':
new_group['declarations'] += line.strip()
new_group['persons'] += 1
else:
declarations_list.append(new_group)
new_group = {'declarations':"", 'persons':0}
#in case data does not end with newline there might be unprocessed password data
if new_group != "":
declarations_list.append(new_group)
new_group = ""
return declarations_list | d43318244134684b8049d0c464a5e9ab6ab2af7b | 691,878 |
def crop_specified_axis(ctvol, max_dim, axis): #Done testing
"""Crop 3D volume <ctvol> to <max_dim> along <axis>"""
dim = ctvol.shape[axis]
if dim > max_dim:
amount_to_crop = dim - max_dim
part_one = int(amount_to_crop/2.0)
part_two = dim - (amount_to_crop - part_one)
if axis == 0:
return ctvol[part_one:part_two, :, :]
elif axis == 1:
return ctvol[:, part_one:part_two, :]
elif axis == 2:
return ctvol[:, :, part_one:part_two]
else:
return ctvol | 2400b886e3545a7c4fa5e8958cf84927ed22b9b0 | 691,879 |
def linear(input, weight, bias):
"""
Applies a linear transformation of `X @ W.t()+b`
Inputs:
- input: mini-batch input X with dim (N,D1)
- weight: weights matrix with dim (D2, D1)
- bias : Bias with dim (D2,)
Output:
- output: transformed tensor with dim (N,D2)
"""
output = input.mm(weight.t())
if bias is not None:
output += bias
return output | f6fa17dd5d2fc5a69d4fc7025a7324a58e7d11b1 | 691,880 |
import os
def parseFile(filename):
"""(str)->(str, str)
parse the filename and get the two type to be classified
input: ~/dataset/low_high.xlsx
output: ('low', 'high')
"""
try:
type1, type2 = os.path.basename(filename).split(".")[0].split("_")[:2]
except ValueError:
type1, type2 = None, None
return type1, type2 | 9ea95d3d1b5fd205da676ea5500472b3ba1ae1f8 | 691,881 |
import re
def get_seat_id(seat_code):
"""
>>> get_seat_id('BFFFBBFRRR')
567
>>> get_seat_id('FFFBBBFRRR')
119
>>> get_seat_id('BBFFBBFRLL')
820
"""
seat_code = re.sub(r'B|R', '1', seat_code)
seat_code = re.sub(r'F|L', '0', seat_code)
return int(seat_code[:7], 2) * 8 + int(seat_code[7:], 2) | de63ae14fa669a75fa81529e5afa3cbc1568af5d | 691,882 |
import socket
def _get_ip(remote):
"""Get the local IP of a connection to the to a remote host."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.connect((remote, 1883))
address = sock.getsockname()[0]
finally:
sock.close()
return list(map(int, address.split("."))) | 57580d483a9785de3d51f76bcb9b4014614de5df | 691,883 |
from typing import List
def _interpolation_search(arr: List[int], x: int) -> int:
"""
Return the position i of a sorted array so that arr[i] <= x < arr[i+1]
Args:
arr (:obj:`List[int]`): non-empty sorted list of integers
x (:obj:`int`): query
Returns:
`int`: the position i so that arr[i] <= x < arr[i+1]
Raises:
`IndexError`: if the array is empty or if the query is outside the array values
"""
i, j = 0, len(arr) - 1
while i < j and arr[i] <= x < arr[j]:
k = i + ((j - i) * (x - arr[i]) // (arr[j] - arr[i]))
if arr[k] <= x < arr[k + 1]:
return k
elif arr[k] < x:
i, j = k + 1, j
else:
i, j = i, k
raise IndexError(f"Invalid query '{x}' for size {arr[-1] if len(arr) else 'none'}.") | edb6075ea532da54ea7b4152fda23a882dcb1113 | 691,884 |
def compare_with_leanix(policies: dict):
"""
compares specified policy permssions with LeanIX's prescribed policy permission set under
requirements.py
"""
output = dict()
for policy, data in policies.items():
if data['exists']:
permission_check = data['req_permissions'].items(
) == data['aws_permission'].items()
output[policy] = {
'exists': data['exists'],
'permission_check': permission_check,
'aws_permission': data['aws_permission'],
'req_permission': data['req_permissions'],
'mandatory': data['mandatory']
}
else:
if ('aws_permission' in data) and (data['req_permissions'] is not None):
output[policy] = {
'exists': data['exists'],
'aws_permission': data['aws_permission'],
'req_permission': data['req_permissions'],
'permission_check': False,
'mandatory': data['mandatory']
}
else:
output[policy] = {
'exists': data['exists'],
'aws_permission': None,
'req_permission': data['req_permissions'],
'permission_check': False,
'mandatory': data['mandatory']
}
return output | 1173f011df78a183bb926794b0fdd67dc4f6a91c | 691,886 |
import os
def get_url_filename(url):
"""
|##@函数目的:获取URL地址中的文件名
|##@参数说明:
|##@返回值:
|##@函数逻辑:
|##@开发人:jhuang
|##@时间:
"""
filename = str(os.path.basename(url))
return filename | 3d2aa7321e810eb51d1e5fc81f26a0e927e1d4b0 | 691,887 |
import shutil
import os
def file_transfer_handler(default_dir, target_dir):
"""
移动文件夹 file_transfer_handler('../SaveFile', '../targetFile')
:param default_dir: 默认存储路径
:param target_dir: 目标路径
:return:
"""
try:
if not os.path.exists(target_dir):
os.makedirs(target_dir)
shutil.move(default_dir, target_dir)
print('移动完成: ', target_dir)
return True
except Exception as e:
print(e)
return False | f465bd09bcd236b2cf9a0070c503200269358dff | 691,888 |
import sqlite3
def db_connection(database):
"""
Connect to an SQL database
"""
return sqlite3.connect(database) | 6990f5b988add38a541fb523e0b58a4eacb12ba5 | 691,889 |
def intersect_lists(l1, l2):
"""Returns the intersection of two lists. The result will not contain
duplicate elements and list order is not preserved."""
return list(set(l1).intersection(set(l2))) | ad5ed35d7458ff96491cb2669670ba7a49cfc930 | 691,890 |
def _adaptive_order_1(q, i, j, recons):
"""
First-order reconstruction.
First-order reconstruction is given by
.. math::
\hat{q}_{i + 1/2} = q_i
"""
recons[2 * i + 1] = q[j]
recons[2 * i + 2] = q[j]
return True | 1a49bed58094988b6e884427c63d3baf5daf1ae8 | 691,891 |
def Permutacao(num):
"""
param num: número de elementos distintos. \n
return : se for retornado o valor -1 quer dizer que ouve um erro.
"""
if num > 0:
soma = num
for i in range(1, num):
soma = soma*i
return soma
else:
if num == 0:
soma = 1
return soma
else:
return -1 | bed4deb74e4788a8743b22587400a55363fae368 | 691,892 |
from enum import Enum
def VictoryEnum(ctx):
"""Victory Type Enumeration."""
return Enum(
ctx,
standard=0,
conquest=1,
exploration=2,
ruins=3,
artifacts=4,
discoveries=5,
gold=6,
time_limit=7,
score=8,
standard2=9,
regicide=10,
last_man=11
) | c5c347be3c727a0002ab5f98cd20344b27173c42 | 691,893 |
def str_to_bool(str):
"""Convert a unicode string to equivalent boolean value and is case insensitive."""
if str.lower() == 'true':
return True
elif str.lower() == 'false':
return False
else:
raise ValueError("{} in the app.config file is not a boolean value".format(str)) | 210a30333d4771c51a1821b72e5e3151ebc3f5a6 | 691,894 |
import textwrap
def get_notifiers_provider_config(message, subtitle, title) -> dict:
"""
Return kwargs that will be passed to `notifiers.notify` method.
"""
# different providers have different requirements for the `notify` method
# most seem to take a `message` parameter, but they also have different
# potential formatting requirements for messages.
# use the following provider-specific map for `notify` parameters
provider_config = {
"pushover": {
"message": textwrap.dedent(
f"""
<i>{subtitle}</i>
{message}
"""
),
"title": title,
"html": True,
},
"slack": {"message": message if message else "task complete"},
}
return provider_config | 45d78bdd37925d85eab91aaad430696f23cb4c70 | 691,896 |
def rgb2hex(pix):
"""Given a tuple of r, g, b, return the hex value """
r, g, b = pix[:3]
return "#{:02x}{:02x}{:02x}".format(r, g, b) | e60efd7fbe3a89678f9c53483a2eb1b8587c981c | 691,897 |
def markdown_link(desc: str, link: str, t: str = "") -> str:
"""Gets rid of the thinking while creating a link for markdown."""
return f"[{t}{desc}{t}]({link})" | 124363f9e3913a2e00965d22d85c47f34e3e36d5 | 691,898 |
def decode_uint256(s: bytes) -> int:
"""Decode 256-bit integer from little-endian buffer."""
assert len(s) == 32
return int.from_bytes(s, 'little') | 7f1158237aee5ad06b0f24a3ce5f029aaa491d32 | 691,899 |
def definition():
"""Adding oncosts to v_calc_staff_period"""
sql = """
SELECT v.*,
--NI
--Rate, given pay in all areas
ISNULL(dbo.udfNI(v_total.value, ni.threshold, ni.rate),0)
-- Multiplied by the proportion of pay accounted for on this line
*v.value/v_total.value as ni,
--Pension
v.value*pension.rate as pension,
--Travel
v.travel_scheme/12 as travel
FROM v_calc_staff_period AS v
INNER JOIN (
SELECT staff_id, period, set_cat_id, acad_year, SUM(ISNULL(value,0)) as value
FROM v_calc_staff_period
WHERE value > 0
GROUP BY staff_id, period, set_cat_id, acad_year
) AS v_total ON v.staff_id = v_total.staff_id
AND v.period = v_total.period
AND v.set_cat_id = v_total.set_cat_id
AND v.acad_year = v_total.acad_year
INNER JOIN staff_pension_contrib_normal pension ON pension.pension_id = v.pension_id
AND pension.acad_year = v.acad_year
AND pension.period = v.period
INNER JOIN staff_ni_normal ni ON ni.acad_year = v.acad_year
AND ni.period = v.period
"""
return sql | 481daead5bb6b5bbec4bbdf4c4982bbd75e560e9 | 691,900 |
import os
import json
def load_fixture(filename, asjson=False):
"""Load a fixture."""
filename = f"{filename}.json" if "." not in filename else filename
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
if asjson:
return json.loads(fptr.read())
return fptr.read() | f255aae664f7f58785458ef7702a2b0eaa7689fe | 691,901 |
from typing import Dict
def A(b: int, c: int) -> Dict:
"""Function that outputs a typing type."""
return {'a': b + c} | e915eef223819157a1294aee80bfaa80bab9b06d | 691,902 |
def contains_whitespace(row):
"""Gets the total number of whitespaces from each column of a row"""
return sum([" " in i for i in row if isinstance(i, str)]) | 6b471f4cbefa737f0d1678dcd8a6fdfe5f57f0b3 | 691,903 |
def bg(text, color):
"""Set text to background color."""
return "\33[48;5;" + str(color) + "m" + text + "\33[0m" | b8b801005f4448c7e2a72e6c0e7cbb59ddae9ac5 | 691,904 |
import torch
def reshape_and_split_tensor(tensor, n_splits):
"""Reshape and split a 2D tensor along the last dimension.
Args:
tensor: a [num_examples, feature_dim] tensor. num_examples must be a
multiple of `n_splits`.
n_splits: int, number of splits to split the tensor into.
Returns:
splits: a list of `n_splits` tensors. The first split is [tensor[0],
tensor[n_splits], tensor[n_splits * 2], ...], the second split is
[tensor[1], tensor[n_splits + 1], tensor[n_splits * 2 + 1], ...], etc..
"""
feature_dim = tensor.shape[-1]
tensor = torch.reshape(tensor, [-1, feature_dim * n_splits])
tensor_split = []
for i in range(n_splits):
tensor_split.append(tensor[:, feature_dim * i: feature_dim * (i + 1)])
return tensor_split | cf4015fc8fea1fc32dcbb8dd5b956c163d71d90a | 691,905 |
def GetDatabaseFallthrough():
"""Python hook to get the value for the default database.
Firestore currently only supports one database called '(default)'.
Returns:
The name of the default database.
"""
return '(default)' | 624fd238b551d10b494dde9d8bf78f1dbe706dec | 691,906 |
def default_stko_MD_settings():
"""
Default settings from stk source code as of 26/04/19.
"""
Settings = {
'output_dir': None,
'timeout': None,
'force_field': 16,
'temperature': 300, # K
'conformers': 50,
'time_step': 1.0, # fs
'eq_time': 10, # ps
'simulation_time': 200, # ps
'maximum_iterations': 2500,
'minimum_gradient': 0.05,
'use_cache': False
}
return Settings | 399a9923d88d1eb7c91bfa734aeb1cb5398683ce | 691,907 |
def _join(words):
"""Join words into single line.
Args:
words: List of words.
Returns:
String with space separated words.
"""
return u' '.join(words) if words else u'' | 21707a9abebf03afc1cf81c7454ee172f1b40d04 | 691,908 |
from typing import Counter
def make_summary(tests, **kwargs):
"""Return JSON-serializable test result summary."""
summary = Counter([t['outcome'] for t in tests.values()])
summary['total'] = sum(summary.values())
summary.update(kwargs)
return summary | 9cdcb98f22ea5662a2ec5cd98b603dabf02b923a | 691,909 |
def lam(request):
"""
NB listmode is too slow, so the original test of 10k+ counts is too big
"""
return request.param | ba07f796f0fffd932ffabee9b81288b65cdcda15 | 691,910 |
def event_rank(count):
"""Determine event ranking."""
if count < 10:
return 'constable'
elif count < 100:
return 'sergeant'
elif count < 250:
return 'inspector'
elif count < 500:
return 'superintendent'
elif count < 1000:
return 'commander'
else:
return 'commissioner' | 17980d6e8508932607fd108c99da094c3b63b510 | 691,911 |
import random
def mutations(children, mutate_odds,mutate_min, mutate_max):
"""Check if the child will be mutated"""
#Mutates the childern which for the most part has bad mutations but those good ones are well really good.
#Note: When running the program they are really powerful so you don't half to but for good results keep them in
#the decimal places.
for index, person in enumerate(children):
if mutate_odds >= random.random():
children[index] = round(person * random.uniform(mutate_min,
mutate_max))
return children | 3d6d606602c19042bc765ec2b239b098e04aa160 | 691,912 |
from typing import Counter
def get_vocab(training_set, vocab_size_threshold=5):
"""Get the vocabulary from the training set"""
vocab = []
for st in training_set:
for s in st:
vocab.extend(s)
vocab = Counter(vocab)
vocab_truncate = [w for w in vocab if vocab[w] >= vocab_size_threshold]
word2id = {"_GOO": 0, "_EOS": 1, "_PAD": 2, "_UNK": 3}
id2word = {0: "_GOO", 1: "_EOS", 2: "_PAD", 3: "_UNK"}
i = len(word2id)
for w in vocab_truncate:
word2id[w] = i
id2word[i] = w
i += 1
assert(len(word2id) == len(id2word))
print("vocabulary size: %d" % len(word2id))
return word2id, id2word | 0a3e8e20736904c53016bd2d572d7c6b3df3ed20 | 691,913 |
def get_encoded_and_regular_cols(cols, onehot_dict):
"""return a list of onehot encoded cols and a list of remainder cols.
"""
encoded_cols = []
for enc_cols in onehot_dict.values():
if len(enc_cols) > 1:
encoded_cols.extend(enc_cols)
regular_cols = [col for col in cols if col not in encoded_cols]
return encoded_cols, regular_cols | 0043df566468b12dee649b35b7a662421b53bd55 | 691,915 |
import random
def get_random_edit_script(old_sequence, new_sequence):
"""
Used for testing. The Myers algorithms should never produce an edit script
that is longer than the random version.
"""
es = []
N = len(old_sequence)
M = len(new_sequence)
x = 0
y = 0
D = 0
while not (x == N and y == M):
while (x < N) and (y < M) and (old_sequence[x] == new_sequence[y]):
x = x + 1
y = y + 1
if (x < N) and (y < M):
if random.randint(0, 1):
es.append({"operation": "delete", "position_old": x})
x = x + 1
else:
es.append({"operation": "insert", "position_old": x, "position_new": y})
y = y + 1
D = D + 1
elif x < N:
es.append({"operation": "delete", "position_old": x})
x = x + 1
D = D + 1
elif y < M:
es.append({"operation": "insert", "position_old": x, "position_new": y})
y = y + 1
D = D + 1
return es | 598b598ec00065f85cc136624ac1a4c04e0ae2fc | 691,916 |
def tracking(gfftracking):
"""
Read the data from the GffCompare gffcmp.tracking file and
format as a dictionary.
Only takes three-way matches from the file.
:return: tcons_XXXX (key) : [[transcript_id 1],
[transcript_id 2],
[transcript_id 3]] (value)
"""
tcons = {}
with open(gfftracking) as file:
for line in file:
line = line.split()
transcripts = line[4::]
temp_list = []
# '-' means that at least one pipeline did not have a matching transcript
# only take lines with three transcripts
if '-' not in transcripts:
for transcript in transcripts:
temp_list.append(transcript.split('|')[1])
tcons[line[0]] = temp_list
return tcons | c3078990ea9e820dcdffca54a090aba67b05e555 | 691,917 |
def CreateLookUpTable(numStrands, lengthStrands):
"""
Returns a look up table for the scaffold in string formatm
initialized with empty initial values = ''.
"""
lookUpScaffold = [['' for x in range(lengthStrands)]
for y in range(numStrands)]
return lookUpScaffold | 731a2a0fc04f0ece934c7b43f326838c8c410b09 | 691,918 |
from typing import Any
def _remove_names(obj: Any) -> Any:
"""
dict-based representations may have additional names
:param obj:
:return:
"""
if isinstance(obj, dict):
return {k: _remove_names(v) for k, v in obj.items() if k != 'name'}
elif isinstance(obj, list):
return [_remove_names(x) for x in obj]
else:
return obj | 22a737df39f18331dfa66fd14f3fa0ad16237ade | 691,919 |
import codecs
import os
def get_names():
"""
Return a list of names.
"""
return [n.strip().replace("'", "-") for n in codecs.open(os.path.join("data", "names.txt"), "rb", 'utf8').readlines()] | 292f47c455b719d019d4c95d71060ee944f9315e | 691,920 |
def scan_aggs(search, source_aggs, inner_aggs={}, size=10):
"""
Helper function used to iterate over all possible bucket combinations of
``source_aggs``, returning results of ``inner_aggs`` for each. Uses the
``composite`` aggregation under the hood to perform this.
"""
def run_search(**kwargs):
s = search[:0]
s.aggs.bucket('comp', 'composite', sources=source_aggs, size=size, **kwargs)
for agg_name, agg in inner_aggs.items():
s.aggs['comp'][agg_name] = agg
return s.execute()
response = run_search()
while response.aggregations.comp.buckets:
for b in response.aggregations.comp.buckets:
yield b
if 'after_key' in response.aggregations.comp:
after = response.aggregations.comp.after_key
else:
after= response.aggregations.comp.buckets[-1].key
response = run_search(after=after) | bfd1324cd0174ab679698870a032ebc5fb8dfd7d | 691,921 |
def method4all(f):
"""
Use this function as a decorator,
The decorated function under Loop class can be used on outer layer
"""
setattr(f,"forall",True)
return f | 98b6593e3e7f26a88b1ad8f008aab47b52dc0bb1 | 691,922 |
def bfs(graph, start):
"""
- queue
"""
visited = []
queue = [start]
while queue:
vertex = queue.pop(0)
if vertex not in visited:
visited.append(vertex)
# queue.extend(graph[vertex] - set(visited))
queue.extend(graph[vertex])
return visited | fd686ec9103e8d5f0a400aabec05eb2ad310a142 | 691,924 |
def rwrap(some_string):
"""Wraps a string to be red."""
return "\033[91m%s\033[0m" % some_string | 8e9151a54b8aca372eda838ae387514ae3cbf500 | 691,925 |
async def healthCheck():
"""
Returns 200 for a healthcheck for AWS
"""
return {'ok'} | 6cc232df6661f26a1db4e4f6bf35e5de284abff1 | 691,926 |
def _to_int(timestamp):
"""Return the integral part of a timestamp.
Parameters:
timestamp -- NTP timestamp
Retuns:
integral part
"""
return int(timestamp) | f5e8dd7d58228a5d22d2a736341daf74fb020f79 | 691,927 |
def _ww3_prnc_current_contents(run_date, run_type):
"""
:param :py:class:`arrow.Arrow` run_date:
:param str run_type:
:return: ww3_prnc_current.inp file contents
:rtype: str
"""
start_date = run_date.format("YYYYMMDD")
contents = f"""$ WAVEWATCH III NETCDF Field preprocessor input \
ww3_prnc_current.inp
$
$ Forcing type, grid type, time in file, header
'CUR' 'LL' T T
$ Name of dimensions
$
x y
$
$ Sea water current component variable names
u_current v_current
$
$ Forcing source file path/name
$ File is produced by make_ww3_current_file worker
'current/SoG_current_{start_date}.nc'
"""
return contents | 8047deed870dfdce24c521429c1351869d6c141f | 691,928 |
def gft(s, psi):
"""gft: Graph Fourier Transform (GFT)
Args:
s (N x d np.ndarray): Matrix of graph signals. Each column is a signal.
psi (N x N np.ndarray): graph Laplacian eigenvectors
Returns:
s_hat (N x d np.ndarray): GFT of the data
"""
s_hat = psi.T @ s
return s_hat | dec850d7b0ffedc488c5990edc8191f1c8ec6ec6 | 691,929 |
import traceback
import sys
def report_thread_error(fn):
""" Decorator to help catch errors that QT wont report """
def report_thread_error_wrapper(*args, **kwargs):
try:
ret = fn(*args, **kwargs)
return ret
except Exception as ex:
print('\n\n *!!* Thread Raised Exception: ' + str(ex))
print('\n\n *!!* Thread Exception Traceback: \n\n' + traceback.format_exc())
sys.stdout.flush()
et, ei, tb = sys.exc_info()
raise
return report_thread_error_wrapper | a7bf1112aa49d815d60985cf1d3653140f7fc679 | 691,930 |
def CtoK(T_C):
"""Converts Celsius to Kelvin."""
return T_C + 273.15 | ec96a27c012f4dcfde5ac3566825387497193b0f | 691,931 |
def ensureUtf(s, encoding='utf8'):
"""Converts input to unicode if necessary.
If `s` is bytes, it will be decoded using the `encoding` parameters.
This function is used for preprocessing /source/ and /filename/ arguments
to the builtin function `compile`.
"""
# In Python2, str == bytes.
# In Python3, bytes remains unchanged, but str means unicode
# while unicode is not defined anymore
if type(s) == bytes:
return s.decode(encoding, 'ignore')
else:
return s | e2ae57687f6f4310e43f4d54c04d820127502528 | 691,932 |
import requests
def get_vsummary_protein(base_url, id):
"""Yield the response to a protein summary of a query."""
url = base_url + 'vsummary/protein'
params = {
"id": id
}
return requests.get(url, params=params) | c7d92c2f8c88f63b7ba6c04546a180c10babeb12 | 691,933 |
def pow(rv, k):
"""
Returns the square of a random variable
"""
return rv**k | c1e67f9818ee976f563cc9b61d1c9ccf6f06703f | 691,934 |
from datetime import datetime
def check(str1, str2, format_str):
"""Check if two strings are equal (based on format), or both are redacted."""
try:
str1_conv = datetime.strptime(str1, format_str)
str2_conv = datetime.strptime(str2, format_str)
if str1_conv == str2_conv:
return True
else:
return False
except ValueError:
if str1 == str2:
return True # both are redacted the same way, assume correct.
else:
return False | 0f03443136ebe1d55d360147dcf25aa40dffb167 | 691,935 |
def last_kstp_from_kper(hds,kper):
""" function to find the last time step (kstp) for a
give stress period (kper) in a modflow head save file.
Args:
hds (`flopy.utils.HeadFile`): head save file
kper (`int`): the zero-index stress period number
Returns:
**int**: the zero-based last time step during stress period
kper in the head save file
"""
#find the last kstp with this kper
kstp = -1
for kkstp,kkper in hds.kstpkper:
if kkper == kper+1 and kkstp > kstp:
kstp = kkstp
if kstp == -1:
raise Exception("kstp not found for kper {0}".format(kper))
kstp -= 1
return kstp | 5b1936900598a98d0cb4b64d88cabcd7b5802138 | 691,936 |
def RSI(df, base="Close", period=21):
"""
Function to compute Relative Strength Index (RSI)
Args :
df : Pandas DataFrame which contains ['date', 'open', 'high', 'low', 'close', 'volume'] columns
base : String indicating the column name from which the MACD needs to be computed from (Default Close)
period : Integer indicates the period of computation in terms of number of candles
Returns :
df : Pandas DataFrame with new columns added for
Relative Strength Index (RSI_$period)
"""
delta = df[base].diff()
up, down = delta.copy(), delta.copy()
up[up < 0] = 0
down[down > 0] = 0
rUp = up.ewm(com=period - 1, adjust=False).mean()
rDown = down.ewm(com=period - 1, adjust=False).mean().abs()
df['RSI_' + str(period)] = 100 - 100 / (1 + rUp / rDown)
df['RSI_' + str(period)].fillna(0, inplace=True)
return df | 60c69abca3a91a56a8e61e575f6b30cbd3932ba4 | 691,937 |
import argparse
def arg_parser(desc):
"""
Creates an empty parser that can then be added to.
Parameters
----------
desc : str
The description of the program for the parser to use during help sections.
Returns
-------
ArgumentParser
The empty parser with the description.
"""
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, description=desc)
return parser | d497cb05d412f98546b10ce17efc3591fd254b8a | 691,938 |
def instance_module_vhdl_style(module_name, entity_name, ports):
"""
Instance VHDL module
:param module_name:
:param entity_name:
:param ports:
:return:
"""
# Instance the module
disp_str = "\n{} : {} port map (\n".format(entity_name, module_name)
for port in ports:
disp_str = disp_str + " {0:42} => {0},".format(port.name) + '\n'
disp_str = disp_str[:-1] + "\n );\n"
return disp_str | fb54515cb524ce0d5abf12ccf2c30cfcf38957f9 | 691,939 |
def describe_element(name, df):
""" Takes the columns of the dataframe and builds a ply-like description
Parameters
----------
name: str
df: pandas DataFrame
Returns
-------
element: list[str]
"""
property_formats = {'f': 'float', 'u': 'uchar', 'i': 'int'}
element = ['element ' + name + ' ' + str(len(df))]
if name == 'face':
element.append("property list uchar int points_indices")
else:
for i in range(len(df.columns)):
# get first letter of dtype to infer format
f = property_formats[str(df.dtypes[i])[0]]
element.append('property ' + f + ' ' + df.columns.values[i])
return element | d49204c49ddfeca34caaaedd85b770f7230aa7c9 | 691,940 |
import requests
def get_routes_from_microsoft(url):
"""
This function extracts JSON file from Microsoft website, filters out IP addresses,
returns the list of unique IP addresses.
"""
data = requests.get(url).json()
new_routes = [ip for entry in data for ip in entry.get('ips', [])]
new_routes_unique = list(set(new_routes))
return new_routes_unique | 6e6d0df38b80b2b9eb99a2bed00c2c3e57315e76 | 691,941 |
def tri_to_dec(n):
"""
:param n: string representation of a trinary number
:returns: decimal number for n
"""
dec = 0
m = len(n) - 1
for char in n:
dec += int(char) * (3 ** m)
m -= 1
return dec | 6feb98111f783e02859c36c3b3797ce344ef6cf9 | 691,942 |
import os
import requests
import shutil
def download_file(client, url, filename=None):
"""Download a file at a given URL
Code inspired by https://stackoverflow.com/a/39217788.
:param client: A :class:`activityinfo.client.Client` object.
:param url: The URL where the file is located.
:param filename: The name to be given to the file when written to the local file system. If this is not provided,
then the name is taken from the end of ``url``.
:return: The name of the file on the local file system.
"""
if filename is None:
local_filename = url.split('/')[-1]
else:
local_filename = filename
path = os.path.dirname(os.path.abspath(__file__))
local_filename = '{}/AIReports/{}'.format(path, local_filename)
with requests.get(url, auth=client.auth, stream=True) as r:
with open(local_filename, 'wb') as f:
shutil.copyfileobj(r.raw, f)
return local_filename | ce4970493373685a3dcab2fcd706c13ed79d761e | 691,943 |
def get_sex(sex):
"""Return a consistent sex notation (male, female)."""
if sex.lower() == 'm':
return 'male'
if sex.lower() == 'f':
return 'female'
return sex.lower() | cb0b99757008c7f9e2a6e0e71dad9bc169aae564 | 691,945 |
import re
import os
def file_fetch(dirRoot, gameName):
"""File fetch for content in a directory, keeping single-letter prefixes"""
filelist_raw = [f for f in os.listdir(dirRoot) if os.path.isfile(os.path.join(dirRoot, f))]
fileset = {}
reFile = re.compile(r"^([{:}])_.*".format(gameName))
for f in filelist_raw:
objMatch = reFile.search(f)
if objMatch is not None:
if objMatch.group(1) not in fileset:
fileset[objMatch.group(1)] = []
fileset[objMatch.group(1)].append([0, os.path.join(dirRoot, objMatch.group(0))])
return fileset | 0337fabb7b1ff25e56db46f408df2f80239777af | 691,946 |
def policy_rollout(agent, env, num_traj, horizon):
"""Rollout an agent to collect trajectories.
Args:
agent: an agent to rollout.
env: an environment to perform rollouts.
num_traj: the number of trajectories to collect.
horizon: the maximal number of steps for each trajectory.
Returns:
states, actions, rewards and observations from rollout trajectories.
"""
traj_states = []
traj_actions = []
traj_rewards = []
traj_observations = []
for _ in range(num_traj):
time_step = env.reset()
states = []
rewards = []
actions = []
observations = []
for _ in range(horizon):
# MuJoCo specific operations.
states.append(env._gym_env.get_state()) # pylint: disable=protected-access
observations.append(time_step)
action = agent.action(time_step.observation)
actions.append(action)
time_step = env.step(action)
rewards.append(float(time_step.reward))
if time_step.is_last():
break
traj_states.append(states)
traj_actions.append(actions)
traj_rewards.append(rewards)
traj_observations.append(observations)
return traj_states, traj_actions, traj_rewards, traj_observations | 9acdb3767f92626715fb9fbd084e5d42513bc394 | 691,947 |
def pick_files(profile_dir, **kwargs):
"""
Return paths to the files from the profile that should be backed up.
There are 17 files that can be backed up. They have been organized into 11
categories for your convenience:
- autocomplete
- bookmarks
- certificates
- cookies
- dictionary
- download_actions
- passwords
- preferences
- search_engines
- site_settings
- styles
By default all 17 files will be backed up, but you can prune any of the
above categories by passing it as a keyword argument set to False, i.e.
``cookies=False''.
"""
profile_files = { # (no fold)
'autocomplete': [
'formhistory.sqlite',
],
'bookmarks': [
'places.sqlite',
'bookmarkbackups',
],
'certificates': [
'cert8.db',
],
'cookies': [
'cookies.sqlite',
],
'dictionary': [
'persdict.dat',
],
'download_actions': [
'mimeTypes.rdf',
],
'passwords': [
'key3.db',
'logins.json',
],
'preferences': [
'prefs.js',
'user.js',
],
'search_engines': [
'search.json',
'searchplugins',
],
'site_settings': [
'permissions.sqlite',
'content-prefs.sqlite',
],
'styles': [
'chrome/userChrome.css',
'chrome/userContent.css',
],
}
picked_files = []
for key in profile_files:
if kwargs.get(key, True):
picked_files += [profile_dir / x for x in profile_files[key]]
return [x for x in picked_files if x.exists()] | 623e97335747a23f9aee8433d1e3934e29edfd4d | 691,948 |
def file_extensions():
"""Return a list of file extensions associated with MALA equipment output"""
extension_list = ['.cor', '.mrk', '.rad', '.rd3']
return(extension_list) | d5ed2650c2f7d304e920b91a90054defa9a0716e | 691,949 |
import requests
def get_local_page(manga_id):
"""Return local html file name"""
url = f'https://www.mangaupdates.com/series.html?id={manga_id}'
file_name = f'{manga_id}.html'
r = requests.get(url).content
with open(f'raw_data/{file_name}', 'wb') as f:
f.write(r)
return file_name | ab428456d9ea0ef01bac82342f777682f6b2c22a | 691,950 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.