content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def blosxom_sort_list_handler(args):
"""Sorts the list based on ``_mtime`` attribute such that
most recently written entries are at the beginning of the list
and oldest entries are at the end.
:param args: args dict with ``request`` object and ``entry_list``
list of entries
:returns: the sorted ``entry_list``
"""
entry_list = args["entry_list"]
entry_list = [(e._mtime, e) for e in entry_list]
entry_list.sort()
entry_list.reverse()
entry_list = [e[1] for e in entry_list]
return entry_list | 573fee0a720529aebfeaad5d51eda472bee16d84 | 690,818 |
def notes(feature):
"""
Get all notes from this feature. If none a present then an empty list is
returned.
"""
return feature.qualifiers.get("note", []) | 626d4d81527b6656131bd7a7c4dd3c534bb3e318 | 690,819 |
import argparse
def parse_args(args):
""" Parse command line arguments and return them. """
parser = argparse.ArgumentParser(description="XACC VQE Fermion Kernel Generator.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
fromfile_prefix_chars='@')
parser.add_argument("-m", "--molecule", required=True)
parser.add_argument("-a", "--molecule-args", nargs='*', type=float, help="The arguments for the molecule generation source string.")
parser.add_argument("-r", "--args-range", nargs='*', type=float, help="The start, end, and step for a range of args")
opts = parser.parse_args(args)
return opts | 19e624ab2419580bfea531df3bdbd76c6d31e497 | 690,821 |
def text2float(txt: str) -> float:
"""Converts text to float.
If text is not number, then returns `0.0`
"""
try:
return float(txt.replace(",", "."))
except:
return 0.0 | 68ea77531810e4ab485a6feace0f455a466ee97c | 690,822 |
def drop_zeros(tseries, labels, as_vertices):
"""Drop extracted data from label 0 if present"""
if (0 in labels.keys()) and not as_vertices:
zero_column = labels[0]
# drop first column
return tseries.drop(zero_column, axis=1)
else:
return tseries | ad4e068b410253a06cf71cef65f085be3629d703 | 690,823 |
import torch
def bbox_iof_overlaps(b1, b2):
"""
Arguments:
b1: dts, [n, >=4] (x1, y1, x2, y2, ...)
b1: gts, [n, >=4] (x1, y1, x2, y2, ...)
Returns:
intersection-over-former-box pair-wise
"""
area1 = (b1[:, 2] - b1[:, 0] + 1) * (b1[:, 3] - b1[:, 1] + 1)
lt = torch.max(b1[:, None, :2], b2[:, :2])
rb = torch.min(b1[:, None, 2:4], b2[:, 2:4])
wh = (rb - lt + 1).clamp(min=0)
inter_area = wh[:, :, 0] * wh[:, :, 1]
return inter_area / torch.clamp(area1[:, None], min=1) | 728adffe0b74bd48caf677827a88d15c7787d913 | 690,824 |
def get_atom_indices(labels, atom):
"""
labels - a list of coordinate labels ("Elements")
atom - the atom whose indices in labels are sought
Returns a list of all location of [atom] in [labels]
"""
indices = []
for i in range(len(labels)):
if labels[i] == atom:
indices.append(i)
return indices | a5b36423becc9935e64225a6a029999e24471a65 | 690,825 |
def validate_manifest(manifest_data, validator):
"""Validate provided manifest_data against validator, returning list of all raised exceptions during validation"""
return list(validator.iter_errors(manifest_data)) | 021fba2fe84e4d676af2145a4aa1bdd67e573f48 | 690,826 |
def get_prop(obj, *prop_list):
"""
Get property value if property exists. Works recursively with a list representation of the property hierarchy.
E.g. with obj = {'a': 1, 'b': {'c': {'d': 2}}}, calling get_prop(obj, 'b', 'c', 'd') returns 2.
:param obj: dictionary
:param prop_list: list of the keys
:return: the property value or None if the property doesn't exist
"""
if obj is None:
return None
if len(prop_list) == 1:
if prop_list[0] in obj.keys():
return obj[prop_list[0]]
else:
return None
if prop_list[0] in obj.keys():
return get_prop(obj[prop_list[0]], *prop_list[1:])
else:
return None | 1a8f2000fdc3809a92158c72dbe7c926473889ad | 690,827 |
def splitOut(aa):
"""Splits out into x,y,z, Used to simplify code
Args:
aa - dictionary spit out by
Returns:
outkx, outky, outkz (numpy arrays) - arrays of the x, y, and values of the points
"""
outkx = aa['x'][::3]
outky = aa['x'][1::3]
outkz = aa['x'][2::3]
return outkx, outky, outkz | 3be9e1938374a823a78aa6f8478c8cd02e4e9c50 | 690,829 |
def unwrap_cdata(value):
"""Remove CDATA wrapping from `value` if present"""
if value.startswith("<![CDATA[") and value.endswith("]]>"):
return value[9:-3]
else:
return value | fd5188099659002ea0a21d11f4dc80b60d91f377 | 690,830 |
def tab_of_dict_to_string(tab, new_line=True, title=True):
"""
Convert a tab of dic to a string
"""
string = ''
for values in tab:
to_end = ''
for value in values:
# list output always written at end
if 'list' in str(type(values[value])):
to_end += '%s:\n' % str(value)
for w in values[value]:
if w.strip():
to_end += '\t- %s\n' % w.strip()
else:
if title:
string += '%s: %s\n' % (value, str(values[value]))
else:
string += '%s\n' % values[value]
string += to_end
if new_line:
string += '\n'
return string | a292a6e37622f3838eae82969a601504086de700 | 690,831 |
def is_compatible_data_type(expected_data_type, actual_data_type):
"""
Returns boolean value indicating whether the actual_data_type argument is compatible
with the expected_data_type, from a data typing perspective
:param expected_data_type:
:param actual_data_type:
:return:
"""
retval = False
if expected_data_type == 'string':
retval = (actual_data_type in ["<type 'str'>", "<type 'long'>", "<type 'unicode'>"])
elif expected_data_type == 'integer':
retval = (actual_data_type in ["<type 'int'>", "<type 'long'>", "<type 'str'>", "<type 'unicode'>"])
elif expected_data_type == 'long':
retval = (actual_data_type in ["<type 'int'>", "<type 'long'>", "<type 'str'>", "<type 'unicode'>"])
elif expected_data_type in ['boolean', 'java.lang.Boolean']:
retval = (actual_data_type in ["<type 'int'>", "<type 'str'>", "<type 'long'>", "<type 'unicode'>"])
elif expected_data_type in ['float', 'double']:
retval = (actual_data_type in ["<type 'float'>", "<type 'str'>", "<type 'unicode'>"])
elif expected_data_type == 'properties' or expected_data_type == 'dict':
retval = (actual_data_type in ["<type 'PyOrderedDict'>", "<type 'oracle.weblogic.deploy.util.PyOrderedDict'>",
"<type 'dict'>", "<type 'str'>"])
elif 'list' in expected_data_type:
retval = (actual_data_type in ["<type 'list'>", "<type 'str'>", "<type 'unicode'>"])
elif expected_data_type in ['password', 'credential', 'jarray']:
retval = (actual_data_type in ["<type 'str'>", "<type 'unicode'>"])
elif 'delimited_' in expected_data_type:
retval = (actual_data_type in ["<type 'str'>", "<type 'list'>", "<type 'unicode'>"])
return retval | 2994d20d67d6aa874c7371d845b88a0eb76d9861 | 690,832 |
import pkgutil
def get_all_modules(package_path):
"""Load all modules in a package"""
return [name for _, name, _ in pkgutil.iter_modules([package_path])] | 82bed2a436704a3a843fda6490e21ed7d4859b22 | 690,833 |
def GetDeviceNamesFromStatefulPolicy(stateful_policy):
"""Returns a list of device names from given StatefulPolicy message."""
if not stateful_policy or not stateful_policy.preservedState \
or not stateful_policy.preservedState.disks:
return []
return [disk.key
for disk in stateful_policy.preservedState.disks.additionalProperties] | 9a02740424b8742ea866610dc19515784d4c58eb | 690,834 |
def get_ec2_instance_status(cloud_connection, aws_region, instance_ids):
""" Get EC2 instance status
:param cloud_connection: The app Cloud Connection object
:param aws_region: string: The region to use
:param instance_ids: array[] string: Instances IDs to check
"""
conn = cloud_connection.get_connection(aws_region, ["ec2"], boto_version='boto3')
ec2_status = conn.describe_instance_status(
InstanceIds=instance_ids,
)['InstanceStatuses']
return ec2_status | 479b65868b9d99462d96ab477bb4d45914de3df5 | 690,835 |
def score(a, b):
"""Оценочная функция"""
s = 0
# увеличивает вес свойству со значением (они разделены пробелом)
if a and ' ' == a[-1]:
s += 3.0
# уменьшить, если буква находится не на грницах слова
if '-' in a[1:-1] or '-' in b[1:-1]:
s += -2.0
# уменьшить, если буква находится не на грницах слова
if ' ' in a[1:-1] or ' ' in b[1:-1]:
s += -0.5
# если буква в начале слова после -
if a and a[-1] == '-':
s += 1.05
# если буквы подряд
if len(a) == 1:
s += 1.0
return s | 40b4fc658d8ee45a47593bf129a7946c6e74f82b | 690,836 |
def replace_tabs(string: str, tab_width: int = 4) -> str:
"""Takes an input string and a desired tab width and replaces each \\t in the string with ' '*tab_width."""
return string.replace('\t', ' '*tab_width) | f5d530afdb0059673dd3324c3671b38156c8d6ef | 690,838 |
def closestsites(struct_blk, struct_def, pos):
"""
Returns closest site to the input position
for both bulk and defect structures
Args:
struct_blk: Bulk structure
struct_def: Defect structure
pos: Position
Return: (site object, dist, index)
"""
blk_close_sites = struct_blk.get_sites_in_sphere(pos, 5, include_index=True)
blk_close_sites.sort(key=lambda x:x[1])
def_close_sites = struct_def.get_sites_in_sphere(pos, 5, include_index=True)
def_close_sites.sort(key=lambda x:x[1])
return blk_close_sites[0], def_close_sites[0] | 19f28ed590e1ca229bb3240f5aa84bc35f7a0dc8 | 690,840 |
from typing import Any
import inspect
def compare_attributes(obj1: Any, obj2: Any, *, ignore_simple_underscore: bool = False) -> bool:
"""Compares all attributes of two objects, except for methods, __dunderattrs__, and, optionally, _private_attrs.
Args:
obj1 (Any): First object to compare.
obj2 (Any): Second object to compare.
ignore_simple_underscore (:class:`bool`, optional): If ``True``, attributes starting with a single `_` won't be
compared. Defaults to ``False`` (compares attributes starting with a single `_`, but not with two).
Returns:
:class:`bool`: ``True`` if obj1.X == obj2.X for all X (according to the criteria at the function description);
``False`` otherwise.
Examples:
.. testsetup:: *
from serpcord.utils.model import compare_attributes
.. doctest::
>>> class A:
... pass
>>> a, b, c = A(), A(), A()
>>> a.x = 5; a.y = 6; a.z = 7
>>> b.x = 5; b.y = 7; b.z = 7
>>> c.x = 5; c.y = 6; c.z = 7
>>> compare_attributes(a, b) # they differ in one attribute (y)
False
>>> compare_attributes(a, c) # all attributes are equal
True
"""
for k1, v1 in inspect.getmembers(obj1):
if k1.startswith("__") or (not ignore_simple_underscore and k1.startswith("_")):
continue
is_v1_method = inspect.ismethod(v1)
for k2, v2 in filter(lambda k: k and k[0] == k1, inspect.getmembers(obj2)):
if is_v1_method != inspect.ismethod(v2): # one is a method and the other isn't?
return False
if not is_v1_method and v1 != v2:
return False
return True | 55db996ce48051872f45b1b7f398643f0588d89d | 690,841 |
import socket
def remote_os_type_windows(host='127.0.0.1'):
"""
This function will look to see which ports are accepting connections and make a decision based on that. For
the hosts I work with in this code, I can reduce this to looking at a few ports. If one of them is open, it's
a Windows host. Otherwise it's a linux host. This is NOT likely to be useful outside of the circumstances in
which this specific code runs.
:param host:
:return:
"""
ports = (445, 3389, 5985) # SMB, RDP, PowerShell
is_windows = False
for port in ports:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, port))
is_windows = True
except ConnectionRefusedError:
pass
return is_windows | 8e76f0676416101c1fe503f13b57a240fe874519 | 690,842 |
def _updated_or_published(mf):
"""
get the updated date or the published date
Args:
mf: python dictionary of some microformats object
Return: string containing the updated date if it exists, or the published date if it exists or None
"""
props = mf['properties']
# construct updated/published date of mf
if 'updated' in props:
return props['updated'][0]
elif 'published' in props:
return props['published'][0]
else:
return None | e487c381ece11faf40c42c02454907cea5104d31 | 690,843 |
def decode(s):
"""
Return a decoded unicode string from s or None if the string cannot be decoded.
"""
if '\x00' in s:
try:
return s.decode('utf-16-le')
except UnicodeDecodeError:
pass
else:
return s.decode('ascii') | 241680244e33010e3bc683371a3f2090529c3d87 | 690,844 |
def procesar_matriz(matriz):
"""Este es el procedimiento para convertir a enteros los valores"""
x = matriz.split('x')
return [int(i) for i in x] | 777db2faba1b1f6c99bd1d67cf048bb944df8443 | 690,846 |
from pathlib import Path
def is_file(path: Path) -> bool:
"""
Return true if the given path is a file, false otherwise.
:param path: a Path
:return: a bool
"""
return path.is_file() | 16a110aed72917683bea00672778aba8676c0790 | 690,847 |
def dict2str(d):
"""Pretty formatter of a dictionary for one-line logging."""
return str(sorted(d.items())) | ba50638cecebc3cd013b5209a954dba4c2bf4555 | 690,848 |
import os
def local_file(name):
"""
Returns the full path to a filenam inside the plugin area
"""
return os.path.join(os.path.dirname(os.path.realpath(__file__)), name) | edf7d0352c80a380bd897828ba41bed8c176ab31 | 690,849 |
def replace_field(field, value):
""" if Retard field is not empty, replace content by retard: """
if field == 'retard':
if value is None or value == '':
return ''
else:
return 'retard :'
else:
return value | 5ecee6db7157cae750d165412e43fe804c173d3f | 690,851 |
def retrieve_min_max_from_path(path):
"""looks for min and max float in path
Args:
path (str): folder path
Returns:
(float, float) retrieved min and max values
"""
path = path.replace("\\", "")
path = path.replace("/", "")
return float(path.split("_")[-2]), float(path.split("_")[-1]) | a2699cd7cf2b8f562e8afb22b718b1e1c5f25a20 | 690,852 |
def type_unpack(type):
""" return the struct and the len of a particular type """
type = type.lower()
s = None
l = None
if type == 'short':
s = 'h'
l = 2
elif type == 'bool':
s = 'c'
l = 1
elif type == 'ushort':
s = 'H'
l = 2
elif type == 'int':
s = 'i'
l = 4
elif type == 'uint':
s = 'I'
l = 4
elif type == 'long':
s = 'l'
l = 4
elif type == 'ulong':
s = 'L'
l = 4
elif type == 'float':
s = 'f'
l = 4
elif type == 'double':
s = 'd'
l = 8
else:
raise TypeError('Unknown type %s' % type)
return ('<' + s, l) | 1753c1b62944188d3265f33d1c8fce63cc5e0f60 | 690,853 |
def get_unicode_code_points(string):
"""Returns a string of comma-delimited unicode code points corresponding
to the characters in the input string.
"""
return ', '.join(['U+%04X' % ord(c) for c in string]) | dada87ad2fef1948fd899fc39bd1abe7c105ac6c | 690,854 |
import base64
import hmac
import hashlib
def _make_auth(
method,
date,
nonce,
path,
access_key,
secret_key,
query_string="",
ctype="application/json",
):
"""
Create the request signature to authenticate
Args:
- method (str): HTTP method
- date (str): HTTP date header string
- nonce (str): Cryptographic nonce
- path (str): URL pathname
- query (dict, default={}): URL query string in key-value pairs
- ctype (str, default='application/json'): HTTP Content-Type
"""
if isinstance(secret_key, str):
secret_key = secret_key.encode("utf-8")
if isinstance(access_key, str):
access_key = access_key.encode("utf-8")
# query = urlencode(query)
hmac_str = (
(
method
+ "\n"
+ nonce
+ "\n"
+ date
+ "\n"
+ ctype
+ "\n"
+ path
+ "\n"
+ query_string
+ "\n"
)
.lower()
.encode("UTF_8")
)
signature = base64.b64encode(
hmac.new(secret_key, hmac_str, digestmod=hashlib.sha256).digest()
)
auth = (
"On " + access_key.decode("UTF_8") + ":HmacSHA256:" + signature.decode("UTF_8")
)
return auth | 524ad6a0c7651fd3aef6501bda25a91cdc67cd47 | 690,855 |
import subprocess
def _get_deterministic_output(filename):
""" Get values that we can count on to be the same on repeated runs with the same seed. """
# pattern = ('best_01_loss\|best_2norm_loss\|best_reward_per_ep\|'
# 'best_reward_per_ep_avg\|test_01_loss\|test_2_norm\|'
# 'test_reward_per_ep\|constituting')
pattern = 'best_01_loss'
return subprocess.check_output(
'grep "{}" {} | cat -n'.format(pattern, filename),
shell=True).decode() | 67de0ae4b092427f30d4ba830a93d9ccd925b3fc | 690,856 |
def list_numbers():
"""
076
Create a list of four three-digit numbers. Display the list to the user, showing each item from
the list on a separate line. Ask the user to enter a three-digit number. If the number they
have typed in matches one in the list, display the position of that number in the list,
otherwise display the message “That is not in the list”.
"""
three_digit_lst = [333, 777, 999]
user_num = int(input("Enter a three-digit number: "))
return three_digit_lst.index(user_num) if user_num in three_digit_lst else "Not in list" | 67f7631a6c40517a8537580af380bc7a4ab15fbf | 690,857 |
def as_markdown(args):
""" Return args configs as markdown format """
text = "|name|value| \n|-|-| \n"
for attr, value in sorted(vars(args).items()):
text += "|{}|{}| \n".format(attr, value)
return text | 0d1cf326f33f63ff71afe3befc20f1d3cc7a1adb | 690,858 |
def dijkstra(graph, start):
"""
查找start到任意节点的最短路径
:param graph:
:param start: 起始节点
:return:
"""
# 记录路径结果:"起点终点": ("路径", 路径权重)
result = {start*2: (start, 0)}
# 记录已经访问过的节点(途中如果没有遇到新的节点,则遍历结束)
visited = set()
current = start
while True:
nodes = []
# 查找当前节点的联通节点
for node in graph[current].keys():
if node not in visited:
nodes.append(node)
# 当前节点没有联通节点,或所有联通节点已经访问过,结束遍历
if not nodes:
break
if start == current:
# 如果当前节点是起始节点,直接写入路径
for node in nodes:
result[start + node] = (start + "->" + node, graph[start][node])
else:
# 查找出起始节点到当前节点的路径信息(路径, 权重)
current_path_weight = result[start+current]
# 遍历当前节点所有未访问过的联通节点,并记录下起点到联通节点的路径和权重
for node in nodes:
weight = graph[current][node]
# 计算起始节点到联通节点的距离
distance = current_path_weight[1] + weight
key = start + node
if key not in result:
# 结果中还没有起始节点到联通节点的路径,直接记录即可
result[key] = (current_path_weight[0] + "->" + node, distance)
else:
# 如果结果中已经存在起始节点到联通节点的路径,需要比较权重后保留权重较小的路径
exists_distance = result[key][1]
if distance < exists_distance:
result[key] = (current_path_weight[0] + "->" + node, distance)
# 标记当前节点已经访问过
visited.add(current)
# 从已访问的路径中选择最短的一条,找到其对应的末尾节点继续下一轮遍历
min_weight = -1
min_key = None
for key, t in result.items():
if key[1:] in visited:
continue
if min_weight == -1 or t[1] <= min_weight:
min_weight = t[1]
min_key = key
if min_key:
# 更新当前节点,继续遍历
current = min_key[1:]
else:
# 遍历完成,跳出while循环
break
return result | 39f4a75c916728a2a1bede77b1352a5d955031f4 | 690,859 |
def palette_to_rgb(palette_color_summary, rgb_color_summmary):
"""
palette_color_summary : number of pixel per index
rgb_color_summmary : number of pixel per rgb
output : rgb per index
"""
index_pixel_count = [None] * len(palette_color_summary)
palette_rgb = [None] * len(palette_color_summary)
for i in range(0,len(palette_color_summary)):
index_pixel_count[i] = [pl_color[0] for pl_color in palette_color_summary if pl_color[1] == i][0]
for i in range(0,len(palette_color_summary)):
palette_rgb[i] = [rgb_color[1] for rgb_color in rgb_color_summmary if rgb_color[0] == index_pixel_count[i]][0]
return palette_rgb | 509ef8cc4b3330de3fdea1da63ea1e54e04e6c48 | 690,860 |
def scaleto100(value):
"""Scale the input value from 0-255 to 0-100."""
# Make sure a low but non-zero value is not rounded down to zero
if 0 < value < 3:
return 1
return max(0, min(100, ((value * 100.0) / 255.0))) | 64602c13f30aade93668bf7a51fd0dcdcdae2a63 | 690,861 |
import sys
def read_track_details():
"""Return the artist name and track names.
Artist name must be the first line in the command-line argument.
Track names must be inputted to stdin with newlines separating each
name.
"""
if len(sys.argv) < 2: # sys.argv[0] is this script
raise ValueError("No artist was given")
sys.argv.pop(0)
artist = ' '.join(sys.argv)
tracklist = []
for line in sys.stdin:
tracklist.append(line.strip()) # Remove whitespace
if not tracklist:
raise ValueError("No track names were given")
return artist, tracklist | b63e48c4108529479db62f4d8bec9f426d1bdc10 | 690,862 |
def ndbi(swir, red, nir, blue):
"""
Converts the swir, red, nir and blue band of Landsat scene to a
normalized difference bareness index.
Source: Zao and Chen, "Use of Normalized Difference Bareness Index
in Quickly Mapping Bare from TM/ETM+", IEEE Conference Paper, 2005
DOI: 10.1109/IGARSS.2005.1526319
:param swir: numpy.array, shortwave infrared band
:param red: numpy.array, red band
:param nir: numpy.array, near infrared band
:param blue: numpy.array, blue band
:return: normal difference bareness index
"""
# bareness index components
x = (swir + red) - (nir + blue)
y = (swir + red) + (nir + blue)
# prevent division by zero error
y[y == 0.0] = 1.0
# bareness index
img = x / y
img[y == 1.0] = 0.0
# clip range to normal difference
img[img < -1.0] = -1.0
img[img > 1.0] = 1.0
return img | 5a79e2af482e56429cfe76d3ab12ae9e487573af | 690,863 |
def read_public_key(key_file):
"""
Pem key
:param key_file:
:return:
"""
key_str = open(key_file, 'r').read()
return key_str.replace('-----BEGIN PUBLIC KEY-----\n', '').replace('\n-----END PUBLIC KEY-----', '').strip() | b6e8c96a0b18fa8ea9c2bec53673ee54fe5b1bc7 | 690,864 |
import torch
def reg_binary_entropy_loss(out):
"""
Mean binary entropy loss to drive values toward 0 and 1.
Args:
out (torch.float): Values for the binary entropy loss. The values have to be
within (0, 1).
Return:
torch.float for the loss value.
"""
return torch.mean(-out * torch.log(out) - (1 - out) * torch.log(1 - out)) | 7babb3cef8e0ac6185861f9470b121fde654796a | 690,865 |
def get_hyperion_unique_id(server_id: str, instance: int, name: str) -> str:
"""Get a unique_id for a Hyperion instance."""
return f"{server_id}_{instance}_{name}" | 38c830ca9a319a2d0795b4c2eeb3c1e6c97f0560 | 690,866 |
def perform_cuts(full_catalog):
"""
Perform quality cuts like expected
:param full_catalog:
:return:
"""
quality_cuts = (full_catalog["Q0"] < 2.0) & (full_catalog["Q2"] < 1.0) & (
(full_catalog["Mstar_50"] - full_catalog["Mstar_16"]) < 0.5) & \
((full_catalog["Mstar_84"] - full_catalog["Mstar_50"]) < 0.5) & \
((full_catalog["SFR_50"] - full_catalog["SFR_16"]) < 0.5) & \
((full_catalog["SFR_84"] - full_catalog["SFR_16"]) < 0.5)
return full_catalog[quality_cuts] | 2a48e7dbfb88f99204ed5923724cc3074f9757cc | 690,867 |
def getModel(tsinput):
"""
This is the wrapper function for all profile models implemented in
the runInput package.
Parameters
----------
tsinput : :class:`.tsinput`
A TurbSim input object.
Returns
-------
profModel : A subclass of :class:`.profModelBase`
The appropriately initialized 'profile model' object
specified in `tsinput`.
"""
# This executes the sub-wrapper function (defined below) specified
# in the tsinput-object (input file WINDPROFILETYPE line)
return eval('_' + tsinput['WindProfileType'].lower() + '(tsinput)') | 9bd0bcb323f7dd287e3e254af042d2bd9c64c118 | 690,869 |
import os
def filename(value):
"""
RUS: Возвращает базовое имя пути файла.
"""
try:
fn = os.path.basename(value.file.name)
except IOError:
fn = None
return fn | 4851f11293d38330b308b96f2252dc95b2a9357b | 690,870 |
def _should_take(id_, mask, event):
"""Check if event has same non-masked bits as id_.
"""
id_bin = '{0:016b}'.format(id_)
mask_bin = '{0:016b}'.format(mask)
event_bin = '{0:016b}'.format(event[2])
take_event = True
for i in range(len(mask_bin)):
if int(mask_bin[i]) == 1:
continue
if int(id_bin[i]) != int(event_bin[i]):
take_event = False
break
return take_event | a0d2be04240d27bdc3703c3bb63d2bd949c890bb | 690,871 |
from datetime import datetime
def validate_cache(beatmap: dict):
""" Check if the map cache is still valid. """
if beatmap is None:
return False
valid_result = True
cached_time = datetime.fromisoformat(beatmap["time_cached"])
time_now = datetime.utcnow()
previous_sr_update = datetime(2021, 8, 5)
diff = time_now - cached_time
if cached_time < previous_sr_update:
valid_result = False
elif beatmap["status"] == "loved":
if diff.days > 30:
valid_result = False
elif beatmap["status"] == "pending" or beatmap["status"] == "graveyard" or beatmap["status"] == "wip" \
or beatmap["status"] == "qualified":
if diff.days > 7:
valid_result = False
return valid_result | 0fb069d4a07f820400f2c32af6e6b30a38d37cd4 | 690,873 |
from typing import Union
from typing import Collection
from typing import Set
def split_csp_str(val: Union[Collection[str], str]) -> Set[str]:
"""Split comma separated string into unique values, keeping their order."""
if isinstance(val, str):
val = val.strip().split(",")
return set(x for x in val if x) | ad1d57df14469d83dd025b0f951720247eb886bf | 690,874 |
def identity(*args):
""" Always returns the same value that was used as its argument.
Example:
>>> identity(1)
1
>>> identity(1, 2)
(1, 2)
"""
if len(args) == 1:
return args[0]
return args | f2f0e80b690c54090b260c615823b020639f097c | 690,875 |
def accuracy(pred, target):
"""
所有样本中分类正确的样本所占比例
acc = (TP+TN)/(TP+TN+FP+FN)
"""
return (pred == target).sum().item() / len(target) | 476a1e70d9adade32dc155f00d7d82acd64cee9b | 690,876 |
def query_missions():
"""
Build the Query for missions
"""
query = f'''
{{
missions {{
id
name
manufacturers
}}
}}
'''
return query | 22e8e2e88a738a8fe48433a8f58280b91f424540 | 690,877 |
def bita_to_bool(b):
"""
Return a boolean value represented by the specified bitarray instance.
Example::
>>> bita_to_bool(bitarray('0'))
False
"""
return b[-1] | 8c4d24703b3f89dd1a68341e633a80a90283d1f9 | 690,878 |
import pathlib
from typing import Optional
def latest_checkpoint_update(target: pathlib.Path,
link_name: str) -> Optional[pathlib.Path]:
"""
This function finds the file that the symlink currently points to, sets it
to the new target, and returns the previous target if it exists.
:param target: A path to a file that we want the symlink to point to.
:param link_name: This is the name of the symlink that we want to update.
:return:
- current_last: This is the previous target of the symlink, before it is
updated in this function. If the symlink did not exist before or did
not have a target, None is returned instead.
"""
link = pathlib.Path(link_name)
if link.is_symlink():
current_last = link.resolve()
link.unlink()
link.symlink_to(target)
return current_last
link.symlink_to(target)
return None | da7654fec781a6b174ce9dedfb7de3efedaac52e | 690,879 |
def isTIFF(filename: str) -> bool:
"""Check if file name signifies a TIFF image."""
if filename is not None:
if(filename.casefold().endswith('.tif') or filename.casefold().endswith('.tiff')):
return True
return False | 33efcce2f2a4c21fd979fd13390a63aa28c50a4f | 690,880 |
from typing import Dict
def dataset_is_authoritative(dataset: Dict) -> bool:
"""Check if dataset is tagged as authoritative."""
is_authoritative = dataset.get("isAuthoritative")
if is_authoritative:
return is_authoritative["value"] == "true"
return False | 44f2554d046f356093281f35530a60cc55c4846f | 690,881 |
def max_farmers(collection): # pragma: no cover
"""Returns the maximum number of farmers recorded in the collection"""
max_farmers = 0
for doc in collection.find({}).sort([('total_farmers',-1)]).limit(1):
max_farmers = doc['total_farmers']
return max_farmers | 9f91774d0fe36fc4299db938e11db45d080ed5c1 | 690,882 |
import hashlib
import pathlib
def calculate_file_hash(file_path):
"""
Calculate the hash of a file on disk.
We store a hash of the file in the database, so that we can keep track of files if they are ever moved to new
drives.
:param file_path:
:return:
"""
block_size = 65536
hasher = hashlib.sha1()
p = pathlib.Path(file_path)
if p.exists():
with p.open('rb') as f:
buf = f.read(block_size)
while len(buf) > 0:
hasher.update(buf)
buf = f.read(block_size)
return hasher.hexdigest()
else:
return None | 59713e788cd61386d52e36eef185fed21175b4f3 | 690,883 |
def word_fits_in_line(pagewidth, x_pos, wordsize_w):
""" Return True if a word can fit into a line. """
return (pagewidth - x_pos - wordsize_w) > 0 | 531be505e71e829f208263d5cede0a8774ef4c35 | 690,886 |
def longest(s1, s2):
"""Return longest sorted string of distinct letters from two strings.
input = 2 strings, characters are a-z
output = 1 string, with distinct characters from both
ex: a = "xyaabbbccccdefww" b = "xxxxyyyyabklmopq" longest(a, b) -> "abcdefklmopqwxy"
"""
concat = s1 + s2
set_up = set(concat)
s_set_up = sorted(set_up)
output = ''.join(s_set_up)
return output | 1277c2e3e15139e479a99674ffe4f8cc55ca8ff3 | 690,887 |
import os
def path_join(base, path):
"""Joins 'base' and 'path' ('path' is interpreted as a relative path).
This method is like os.path.join(), but 'path' is interpreted relatively.
E.g., os.path.join('/a/b', '/c') yields '/c', but this function yields
'/a/b/c'.
Args:
base: The base path.
path: The path to append to base; this is treated as a relative path.
Returns:
The path obtaining by appending 'path' to 'base'.
"""
if os.path.isabs(path):
# Remove drive letter (if we are on Windows).
unused_drive, path_no_drive = os.path.splitdrive(path)
# Remove leading path separator.
path = path_no_drive[1:]
return os.path.join(base, path) | 39a3c630962ceee8bfcd0c1dbf6f11f490c3446b | 690,888 |
import json
def decode_bad_json(data):
"""lingt uses a json encoding that is not standards compliant.
"""
return json.loads(data.replace("\\'", "'")) | 3f84e11e3e24894cd8a21477b614dc29890099b2 | 690,889 |
import os
def scriptDir():
"""
get the script path
"""
return os.path.dirname(os.path.realpath(__file__)) | 86070add8a1230c74cc34a0325ce335a28c4fb3f | 690,890 |
import re
def valid_fname(fname):
"""Function that remove an invalid filename character(s)."""
return re.sub(r'[\\/:*?"<>|]', '', fname) | 54379e7fea21b0efe0006e4b585737c025542b0e | 690,892 |
import os
def is_conemu_ansi():
"""
Return True if using ConEmu.
"""
return os.environ.get("ConEmuANSI", "OFF") == "ON" | 2be1a90f72ea191762b2418e2d2991250372947c | 690,895 |
from typing import Dict
from typing import Any
def create_gyre_prefix(gyre_comb: Dict[str, Any]) -> str:
"""
Creates a GYRE run prefix to use for the given combination of GYRE
parameter values. These prefixes should be unique within one MESA run, but
can be repeated across multiple, separate, MESA runs.
This also needs to be deterministic, reguardless of the fact that the dict
passed in is unordered.
>>> create_gyre_prefix({"a": 2, "c": 3, "b": "hi"})
'gyre_a_2__b_hi__c_3__'
"""
name = "gyre_"
for key in sorted(gyre_comb.keys()):
name += key + "_" + str(gyre_comb[key]) + "__"
return name | 1f26978f16bb43e33934374f858477b0d2430908 | 690,896 |
def findAllInfectedRelationships(tx):
"""
Method that finds all INFECTED relationships in the data base
:param tx: is the transaction
:return: a list of relationships
"""
query = (
"MATCH (n1:Person)-[r:COVID_EXPOSURE]->(n2:Person) "
"RETURN ID(n1) , r , r.date , r.name , ID(n2);"
)
results = tx.run(query).data()
return results | 0b5e35d98ae8a91973e60c68b772e159294db751 | 690,897 |
import typing
def is_same_classmethod(
cls1: typing.Type,
cls2: typing.Type,
name: str,
) -> bool:
"""Check if two class methods are not the same instance."""
clsmeth1 = getattr(cls1, name)
clsmeth2 = getattr(cls2, name)
return clsmeth1.__func__ is clsmeth2.__func__ | da2144380b6811a5843ded2f90d855d3c14d52a5 | 690,898 |
import socket
import pickle
def send_packet(sock, pack):
"""
Send a packet to remote socket. We first send
the size of packet in bytes followed by the
actual packet. Packet is serialized using
cPickle module.
Arguments
---------
sock : Destination socket
pack : Instance of class Packet.
"""
if pack is None or (sock is None or type(sock) != socket.socket):
return # Nothing to send
pack_raw_bytes = pickle.dumps(pack)
dsize = len(pack_raw_bytes)
sock.sendall(dsize.to_bytes(4, byteorder="big"))
sock.sendall(pack_raw_bytes)
return True | 6056663868b7dbc6ad1aa7408ad7044819339308 | 690,899 |
def first_half(dayinput):
"""
first half solver:
"""
positions = [x for x in 'abcdefghijklmnop']
lines = dayinput.split(',')
for move in lines:
if move[0] == 'x':
a, b = move[1:].split('/')
a, b = int(a), int(b)
positions[a], positions[b] = positions[b], positions[a]
elif move[0] == 'p':
a, b = move[1:].split('/')
ai, bi = positions.index(a), positions.index(b)
positions[ai], positions[bi] = positions[bi], positions[ai]
elif move[0] == 's':
spins = int(move[1:])
while spins > 0:
positions.insert(0, positions.pop())
spins -= 1
return ''.join(positions) | a59d09af9b691e0d6ab2043c7d5ffd5e12a3c1cc | 690,900 |
import sysconfig
def getApp():
"""
Gets the application that is running the current python script.
Returns:
(string): Maya or Houdini.
"""
path = sysconfig.get_path('scripts')
if 'Maya' in path:
return 'Maya'
elif 'HOUDIN' in path:
return 'Houdini'
elif 'UnrealEnginePython' in path:
return 'UE4'
elif '3dx' in path and 'Max' in path:
return '3dxMax'
else:
raise ValueError('Current compatible software is Maya or Houdini') | 66ef984cd88a3fbf704e2193f106b33c27f9428f | 690,901 |
import torch
def get_all_indcs(batch_size, n_possible_points):
"""
Return all possible indices.
"""
return torch.arange(n_possible_points).expand(batch_size, n_possible_points) | 0bcf5f26635f70155cbab8ff226bf3169b1e6337 | 690,903 |
import pandas
import numpy
def assign_facility_region(facilities_xlsx):
"""
Loads the facilities excel spreadsheet and returns a dataframe with
that identifies the region the facility is in
"""
# Facility information
facdf = pandas.read_excel(
facilities_xlsx,
sheet_name = 'Washington',
usecols="B,D,J,K"
)
# define latitude bins
lat_partition = [46.9, 48.3, 48.7]
# define conditions used to bin facilities by latitude
conditions = [
(facdf.DockLatNumber < lat_partition[0]),
(facdf.DockLatNumber >= lat_partition[0]) &
(facdf.DockLatNumber < lat_partition[1]),
(facdf.DockLatNumber >= lat_partition[1]) &
(facdf.DockLatNumber < lat_partition[2]),
(facdf.DockLatNumber >= lat_partition[2])
]
# regional tags
values = ['Columbia River','Puget Sound','Anacortes','Whatcom County']
# create a new column and assign values to it using
# defined conditions on latitudes
facdf['Region'] = numpy.select(conditions, values)
return facdf | 73c37ae381a6f901f3c638f57324bbdb9e775b4f | 690,904 |
def N_STS_from_N_SS(N_SS, STBC):
""" Number of space-time streams (N_{STS}) from number of spatial
streams (N_{SS}), and the space-time block coding (STBC) used.
The standard gives this a table (20-12), but it's just addition!
"""
return N_SS + STBC | b7d4172ae2b0b5ec15607dcb6d6e4f41798c7fc8 | 690,905 |
def num_grid_points(d, mu):
"""
Checks the number of grid points for a given d, mu combination.
Parameters
----------
d, mu : int
The parameters d and mu that specify the grid
Returns
-------
num : int
The number of points that would be in a grid with params d, mu
Notes
-----
This function is only defined for mu = 1, 2, or 3
"""
if mu == 1:
return 2*d + 1
if mu == 2:
return 1 + 4*d + 4*d*(d-1)/2.
if mu == 3:
return 1 + 8*d + 12*d*(d-1)/2. + 8*d*(d-1)*(d-2)/6. | 8ebbf251b367ac4375425560e4b80e10e22e36b9 | 690,908 |
def map_common_error_message(response, default):
""" This function parses the error response from uFrame into a meaningful message for the UI.
"""
message = default
if 'requestUUID' in response:
UUID = response.split('requestUUID":')[1].split('"')[1]
message = 'Error Occurred During Product Creation<br>UUID for reference: ' + UUID
elif 'Failed to respond' in response:
message = 'Internal System Error in Data Repository'
return message | b6eecf0f2a1c0c398dba5dd2fd409934d5e073d8 | 690,909 |
from datetime import datetime
import time
def get_end_date_of_schedule(schedule):
"""Return the end date of the provided schedule in ISO 8601 format"""
currenttime = datetime.today()
endtime = datetime(
currenttime.year, currenttime.month, currenttime.day, schedule['end-hour'], schedule['end-minute'])
# manually create ISO8601 string because of tz issues with Python2
ts = time.time()
utc_offset = ((datetime.fromtimestamp(
ts) - datetime.utcfromtimestamp(ts)).total_seconds()) / 3600
offset = str(int(abs(utc_offset * 100))).zfill(4)
sign = "+" if utc_offset >= 0 else "-"
return endtime.strftime("%Y-%m-%dT%H:%M:%S{sign}{offset}".format(sign=sign, offset=offset)) | bf17c699455a4c7c57abd7751888ba12cd539170 | 690,910 |
def get_uncert_dict(res):
"""
Gets the row and column of missing values as a dict
Args:
res(np.array): missing mask
Returns: uncertain_dict (dict): dictionary with row and col of missingness
"""
uncertain_dict = {}
for mytuple in res:
row = mytuple[0]
col = mytuple[1]
if uncertain_dict.get(row):
uncertain_dict[row].append(col)
else:
uncertain_dict[row] = [col]
return uncertain_dict | 336eebcd2fc64294ffe70a03988906eb29fca78c | 690,911 |
def get_repo_dir_name(repo):
"""返回仓库文件夹名称"""
return repo.working_dir.split('/')[-1] | e8e219bfee4c58b82cbee3e28bbb63a1febaaf82 | 690,912 |
def validate(nodes):
"""
Remove suffixes for each domain
"""
validated = nodes[:]
for item, _ in enumerate(validated):
if validated[item].endswith("/"):
validated[item] = validated[item][:-1]
for item, _ in enumerate(validated):
if validated[item].endswith("/ws"):
validated[item] = validated[item][:-3]
for item, _ in enumerate(validated):
if validated[item].endswith("/wss"):
validated[item] = validated[item][:-4]
return sorted(list(set(validated))) | 1692e3a9d9d730f48d83a756f659f57a19952f44 | 690,913 |
def find_gap(arglist,argprefix,argregex):
"""Find the single gap in the list of ints.
Args:
arglist - list with the gap in it
argprefix - prefix to be checked against
argregex - Regex object to check against
Returns:
Missing int in the sequence
"""
matches = [int(argregex.match(i).group(1)) for i in arglist]
matches.sort()
the_sum = sum(matches)
total = (max(matches)+1)*max(matches)/2
return int(total - the_sum) | fc4e2f052a61962bff44d41bb967ac16097dbc3f | 690,915 |
import random
import string
import os
def split_file(prefix, filename, split_num):
"""Split one file to split_num parts, return splited filenames.
It could be viewed as a method of shuffling.
"""
# each file will contain about file_lines lines
file_lines = int(sum(1 for line in open(filename)) / split_num)
dst_dir_hash = ''.join(random.choice(
string.ascii_uppercase + string.digits) for _ in range(10))
dst_dir = os.path.join(prefix, dst_dir_hash)
os.system('rm {}/*'.format(dst_dir))
os.system('mkdir -p {}'.format(dst_dir))
os.system('split -l {} {} {}/'.format(file_lines, filename, dst_dir))
return [os.path.join(dst_dir, f) for f in os.listdir(dst_dir)] | faa8b5d72659cde872d2879d717879c70516574b | 690,916 |
def _and(*args):
"""Helper function to return its parameters and-ed
together and bracketed, ready for a SQL statement.
eg,
_and("x=1", "y=2") => "(x=1 AND y=2)"
"""
return " AND ".join(args) | c917fdaac6296fe01bae57bb359ac62c884a452c | 690,917 |
import copy
def patch_filters(filters):
"""return correct datatype in filters['orgs_filter_control'] """
results = copy.deepcopy(filters)
results['orgs_filter_control'] = list()
for oid in filters['orgs_filter_control'].split(','):
if oid:
results['orgs_filter_control'].append(int(oid))
return results | 8a71d0c7cf39d241fb712ca94327bfa76aeccba4 | 690,918 |
from typing import Dict
def load_lookup_from_csv(csv_source: str, count: int) -> Dict[str, int]:
"""
From Dino, feature extraction utils
Load a lookup dictionary, mapping string to rank, from a CSV file.
Assumes the CSV to be pre-sorted.
:param csv_source: Source data filepath
:param count: number of strings to load
:return: dictionary mapping strings to rank
"""
lookup_dict: Dict[str, int] = dict()
rank: int = 0
with open(csv_source, 'r') as fd:
line = next(fd)
try:
while rank < count:
if line.startswith("#"):
line = next(fd)
continue
lookup_dict[line.strip().split(',')[-1]] = rank
rank += 1
line = next(fd)
except StopIteration:
raise RuntimeError(f"Not enough entries in file. Expected at least {count}, max is {rank}.")
return lookup_dict | 91386a17af033369e2813b2abae805a00d709680 | 690,919 |
import traceback
import sys
from io import StringIO
def sandbox(code: str, block_globals: bool=False,
block_locals: bool=False) -> tuple:
"""Runs the code-string and captures any errors
Args:
code: executable string
block_globals: if True don't use global namespace
block_ locals: if True don't use local namespace
Returns:
output, stderr, and any exception code
"""
old_stdout = sys.stdout
old_stderr = sys.stderr
redirected_output = sys.stdout = StringIO()
redirected_error = sys.stderr = StringIO()
namespace_globals = {} if block_globals else globals()
namespace_locals = {} if block_locals else locals()
output, error, exception = None, None, None
try:
exec(code, namespace_globals, namespace_locals)
except:
exception = traceback.format_exc()
output = redirected_output.getvalue()
error = redirected_error.getvalue()
# reset outputs to the original values
sys.stdout = old_stdout
sys.stderr = old_stderr
return output, error, exception | 667c7507153415d2e9aba1b525415187d8b95a4c | 690,920 |
def rel_change(exist, after):
""" The difference between the largest and smallest frames
Args:
exist: Frame currently intercepted by video.
after: Capture the next frame after video.
Returns:
The difference between frames.
"""
diff = (after - exist) / max(exist, after)
return diff | 7522e15cb82264235e00e68924d5589fdb21d848 | 690,921 |
def get_edges(mapping):
"""
Returns the sorted edge list for topology.
:param mapping: Process-to-node mapping
:return: Topology edge list
"""
proc_map = mapping.mapping
edges = mapping.process_graph.edges(data=True)
return sorted([(min(proc_map[u], proc_map[v]),
max(proc_map[u], proc_map[v]),
data['weight']) for u, v, data in edges]) | d3a1633e338167651be7ef6a74b64646319be354 | 690,922 |
import random
import string
def get_shipping_details_response(order_id, address_id):
"""
Creates a json response for a shipping details request.
:param order_id: ID for the transaction.
:param address_id: Address ID received from Vipps.
:return: A json response for a shipping details request.
"""
shipping_details = {
"addressId": address_id,
"orderId": order_id,
"shippingDetails": [
{
"isDefault": "N",
"priority": 1,
"shippingCost": 30.0,
"shippingMethod": "postNord",
"shippingMethodId": "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
},
{
"isDefault": "Y",
"priority": 2,
"shippingCost": 30.0,
"shippingMethod": "Posten",
"shippingMethodId": "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
}
]
}
return shipping_details | 09d28d84d55e5b8ea8a808155bbac7466523adc7 | 690,923 |
def inversion_score(c):
"""Score indicating inversion"""
tdiff = c.data['T'].diff()
return tdiff[tdiff>0].sum().median() | ea18bbbace92726e78c0d87a5541dfa0a85a9ea5 | 690,924 |
import unicodedata
def _is_control(char):
"""Checks whether `chars` is a control character."""
if ord(char) in (0, 0xfffd):
return True
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False | ac4f684c5868d6c731e55f481c683a53129a7235 | 690,925 |
def fillgaps_time(ds, method='cubic', max_gap=None):
"""
Fill gaps (nan values) across time using the specified method
Parameters
----------
ds : xarray.Dataset
The adcp dataset to clean
method : string
Interpolation method to use
max_gap : numeric
Max number of consective NaN's to interpolate across
Returns
-------
ds : xarray.Dataset
The adcp dataset with gaps in velocity interpolated across time
See Also
--------
xarray.DataArray.interpolate_na()
"""
ds['vel'] = ds.vel.interpolate_na(dim='time', method=method,
use_coordinate=True,
max_gap=max_gap)
if hasattr(ds, 'vel_b5'):
ds['vel_b5'] = ds.vel.interpolate_na(dim='time', method=method,
use_coordinate=True,
max_gap=max_gap)
return ds | 03af3115bdc021cda1afcf763e57c8ebb764ebeb | 690,926 |
def predict1(payload):
"""
Define a hosted function that echos out the input. When creating a
predict image, this is the function that is hosted on the invocations
endpoint.
Arguments:
payload (dict[str, object]): This is the payload that will eventually
be sent to the server using a POST request.
Returns:
payload (dict[str, object]): The output of the function is expected to
be either a dictionary (like the function input) or a JSON string.
"""
print('Predict 1!')
return payload | 8ef0318845080fc280068aebb633ca19d7b5f659 | 690,927 |
def parseQRdata(qrData:str) -> dict:
"""
qrtest.readQR()'den okunmuş qr bilgisini {'barkod':barkod,'tckn':tckn} olarak returnler.
"""
if qrData == "null":
raise Exception("QR okunamadı.")
f1 = qrData.index("barkod:") + len("barkod:")
f2 = qrData.index(";")
barkod = qrData[f1:f2]
f1 = qrData.index("tckn:") + len("tckn:")
f2 = qrData.find(";",f1)
tckn = qrData[f1:f2]
return {
"barkod": barkod,
"tckn": tckn
} | c31592f35620b33e2eadefe0e9886e7ff259ce66 | 690,928 |
def signtest_data(RNG,n,trend=0):
"""
Creates a dataset of `n` pairs of normally distributed numbers with a trend towards the first half of a pair containing smaller values.
If `trend` is zero, this conforms with the null hypothesis.
"""
return [(
RNG.normal(size=size)-trend,
RNG.normal(size=size)
) for size in RNG.randint(15,21,size=n) ] | 58a7595b43a2d34c4b9c62f3593f449c65f08a6e | 690,929 |
def delete_record(session, model, **kwargs):
""" Deletes a record filtered by key(s) present in kwargs(contains model specific fields)."""
keys = list(kwargs.keys())
instance = session.query(model).filter_by(**{k: kwargs[k] for k in keys}).first()
if instance:
session.delete(instance)
session.commit()
return instance | 0a75b958c91293f3747e6ffb2098fd10bd47afcc | 690,930 |
import re
def finditer_with_line_numbers(pattern, input_string, flags=0):
"""
A version of 're.finditer' that returns '(match, line_number)' pairs.
"""
matches = list(re.finditer(pattern, input_string, flags))
if not matches:
return []
end = matches[-1].start()
# -1 so a failed 'rfind' maps to the first line.
newline_table = {-1: 0}
for i, m in enumerate(re.finditer(r'\n', input_string), 1):
# don't find newlines past our last match
offset = m.start()
if offset > end:
break
newline_table[offset] = i
# Failing to find the newline is OK, -1 maps to 0.
for m in matches:
newline_offset = input_string.rfind('\n', 0, m.start())
line_number = newline_table[newline_offset]
yield m, line_number | 3734a31153efa1d821ac7821acd41b5ba1bf8899 | 690,932 |
import csv
def read_employees(csv_file_location):
"""
Convert csv file to dictionary.
Receives a CSV file as a parameter and returns a list of dictionaries from
that file.
"""
csv.register_dialect("empDialect", skipinitialspace=True, strict=True)
employee_file = csv.DictReader(
open(csv_file_location), dialect="empDialect")
employee_list = []
for data in employee_file:
employee_list.append(data)
return employee_list | a48e51f2ad9a0a2f6cd3d8dfd187c5c170ed9260 | 690,934 |
def quinternary_spherical(rho, phi):
"""Zernike quinternary spherical."""
return 924 * rho**12 \
- 2772 * rho**10 \
+ 3150 * rho**8 \
- 1680 * rho**6 \
+ 420 * rho**4 \
- 42 * rho**2 \
+ 1 | 1b9b739d1f2addc1e4444095d33e91a1535aed76 | 690,935 |
def zeros_like_grad(orig, grad):
"""Returns [0]"""
return [orig] | c9041516020da6e735411e0d4443a9f66edffb3e | 690,936 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.