content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import os
def is_file_a_test_file(file_path: str) -> bool:
"""
Check if the given path points to a test file.
:param file_path: path to file.
:return: True if path points to test.
"""
# Check that the file exists because it may have been moved or deleted in the patch.
if os.path.splitext(file_path)[1] != ".js" or not os.path.isfile(file_path):
return False
if "jstests" not in file_path:
return False
return True | 3dcd8a2ddb6b34f9a2ab58c7120d3a41e68edc97 | 694,066 |
import subprocess
def standalone_run(command_list, env=None):
"""This utility is used for external execution of binaries.
It is encapsulated here and passed as a parameter to the
higher-level execution functions because a caller (e.g.
internal CI) may prefer to use their own execution function.
This function always returns the generated stdout and stderr
of the executed process so the caller can examine them."""
p = subprocess.run(command_list, check=True, capture_output=True, env=env)
return p.stdout, p.stderr | 599c4570b02a8ee5dc5872d59c6e20e8aeeec911 | 694,067 |
import errno
def _is_error(self, exception, *errors):
""" Determine if an exception belongs to one of the given named errno module errors. """
errors = [ getattr(errno, error, None) for error in errors ]
return exception.errno in errors | c1aab4862b721f83986b70d96a9906089c44bf5b | 694,068 |
from uuid import uuid4
def create_tmp_name(base=None):
"""Create temporary name using uuid"""
return (base + '_' if base else '') + uuid4().hex[:10] | 041e5d889029be49c50b5597166b94e1234e2c9f | 694,069 |
from datetime import datetime
def format_timestamp() -> str:
"""
Get a string representation
of the current timestamp
which can be parsed by the database.py
:return:
"""
return datetime.now().strftime("%Y-%m-%d %H:%M:%S") | a8f2de656545519591e38a053316da8d2fb50564 | 694,070 |
def take_msb_bytes(read):
"""Read bytes marked with most significant bit.
:param read: Read function
"""
ret = []
while len(ret) == 0 or ret[-1] & 0x80:
ret.append(ord(read(1)))
return ret | d5789c5ffe0ae17ac553221b9c2cc9d720f9af0c | 694,072 |
import re
def check_user_format(email, password, name=None, password2=None):
"""
:param email: email of user trying to register
:param password: passowrd of user trying to register
:param name: Name of user trying to regoster
:param password2: Second input of user trying to regoster
Check the input of the registration process, ensuring that it follows
all necessary semantics
return: if something does not meet requirements return that, otherwise nothing
"""
error_message = None
# Email must conform to RFC 5322
regexp = re.compile(r'([!#-\'*+/-9=?A-Z^-~-]+(\.[!#-\'*+/-9=?A-Z^-~-]+)*|"(['
r']!#-[^-~ \t]|(\\[\t -~]))+")@([!#-\'*+/\-9=?A-Z^-~-]+'
r'(\.[!#-\'*+/-9=?A-Z^-~-]+)*|\[[\t -Z^-~]*])')
if regexp.match(email) is None:
return "Email"
# password must have minimum length 6, at least one upper case,
# at least one lower case, and at least one
# special character
lower = [char for char in password if char.islower()]
upper = [char for char in password if char.isupper()]
special = [char for char in password if not char.isalnum()]
if len(password) < 6\
or len(lower) == 0\
or len(upper) == 0\
or len(special) == 0:
return "Password"
if name is None or password2 is None:
# done checks for login
return error_message
# Now check name and password2 for register use case
# User name has to be non-empty, longer than 2 characters and less
# than 20 characters.
# Space allowed only if it is not the first or the last character
# Name must be alphanumeric only
if len(name) >= 20 or len(name) <= 2\
or name[0] == " " or name[-1] == " "\
or not name.replace(" ", "").isalnum():
return "Name"
# Passwords must match
if password2 != password:
return "Confirm Password"
return error_message | d2982d17c3536b77c1c3866ed71996015030e1b4 | 694,073 |
def _uuid_representer(dumper, data):
"""Add UUID serializer for YAML."""
return dumper.represent_str(str(data)) | ba665f66dba08a338f97e6753e94d2bd8cfe73f0 | 694,074 |
def qs(ll):
"""return len(l) ?s sepeated by ',' to use in queries"""
return ','.join('?' for l in ll) | f7cd199048197630e68d2e60617613c8ed0ee671 | 694,075 |
def _split_dataset_id(dataset_id):
"""splits a dataset id into list of values."""
return dataset_id.split("|")[0].split(".") + [(dataset_id.split("|")[1])] | 9a1c3d23af502fd21db3de9485cfcdb75d84ba6d | 694,076 |
from typing import Union
import torch
import numpy
def inverse_linear_exp_activation(y: Union[float, torch.Tensor]) -> Union[float, torch.Tensor]:
""" x = f^(-1)(y) where f is linear for small x and exponential for large x.
The function f is continuous with continuous first derivative.
"""
if isinstance(y, float):
x = y if y < 1.0 else numpy.log(y) + 1.0
elif isinstance(y, torch.Tensor):
x = torch.where(y < 1.0, y, y.log() + 1.0)
else:
raise Exception("input type should be either float or torch.Tensor ->", type(y))
return x | 63c5fa16b6354ea4420127de8bf05866dc8e0a80 | 694,077 |
def Main():
"""
:return:
"""
j = 1
return j | fd5a031e01b0f8a66a47104d25abb8705a0d1fd3 | 694,078 |
def test_set_add():
"""
>>> type(test_set_add()) is set
True
>>> sorted(test_set_add())
['a', 1, (1, 2)]
"""
s1 = {1, (1, 2)}
s1.add(1)
s1.add('a')
s1.add(1)
s1.add((1, 2))
return s1 | 95f1eb123f09f23bbbade47cdbf2b840247beb5d | 694,079 |
def get_meta_str(vw):
"""
Return workspace meta information string
"""
meta = []
for k in ["Format", "Platform", "Architecture"]:
if k in vw.metadata:
meta.append("%s: %s" % (k.lower(), vw.metadata[k]))
return "%s, number of functions: %d" % (", ".join(meta), len(vw.getFunctions())) | 86c27f992a5576cd1bf36a78f3b97b23a9994fb1 | 694,080 |
from typing import Any
from enum import Enum
def namedtuple_to_dict(obj: Any):
"""Convert NamedTuple or dataclass to dict so it can be used as config"""
res = {}
for k, v in obj.__dict__.items():
if isinstance(v, Enum):
# in case of enum, serialize the enum value
res[k] = v.value
else:
res[k] = v
return res | cabfe299af639683d26e76df6da0b9eaf4305d06 | 694,083 |
def validate_string(str_argument, str_argument_name):
""" Validates if the given argument is of type string and returns it"""
if not isinstance(str_argument, str):
raise ValueError(f"Illegal str argument: {str_argument_name}")
return str_argument | f1e9dbcbd539aab411c4e2c134259694a8dc597b | 694,084 |
def _recurse_subclasses(class_to_recurse):
""" List subclasses """
def generator(x):
for y in x.__subclasses__():
for z in generator(y):
yield z
if x != class_to_recurse:
yield x
return list(generator(class_to_recurse)) | d43bf15780a3f803fadc2f16b068fda652e901ae | 694,085 |
def hit(intersections):
"""Return closest intersection in positive t direction
"""
if intersections:
hits = sorted([_ for _ in intersections if _.t >= 0.0], key=lambda i: i.t)
if hits:
return hits[0]
return None | 6cc8de91bef84cddecfb86701cff703bc0e4c712 | 694,086 |
def jwt_create_response_payload(
token, user=None, request=None, issued_at=None
):
"""
Return data ready to be passed to serializer.
Override this function if you need to include any additional data for
serializer.
Note that we are using `pk` field here - this is for forward compatibility
with drf add-ons that might require `pk` field in order (eg. jsonapi).
"""
return {'pk': issued_at, 'token': token} | 9087d048c952edfcfcb8091c288b3367d1a206e9 | 694,087 |
import numpy
def normalize(samples):
"""Rescapes samples so each individual channel lies between -1 and 1"""
return samples / numpy.amax(numpy.abs(samples), 1)[:, numpy.newaxis] | 9a40b30a3ec254abcdcd1677a66811091dbcef97 | 694,088 |
import json
def is_json(string):
"""
Helper function to determine if a string is valid JSON
"""
try:
json_object = json.loads(string)
except ValueError as e:
return False
return True | b3c70037555a38bb1e64452ab42d2e174b3be893 | 694,089 |
def uint32_tag(name, value):
"""Create a DMAP tag with uint32 data."""
return name.encode('utf-8') + \
b'\x00\x00\x00\x04' + \
value.to_bytes(4, byteorder='big') | 2842aee121d1217e2829705135fbff655ea02ab7 | 694,090 |
def decorator(accessing_obj, accessed_obj, *args, **kwargs):
"""
Checks if accessing_obj is owner/decorator of room obj
is in, or the obj itself if it has no location.
"""
obj = accessed_obj.location or accessed_obj
try:
if accessing_obj in obj.homeowners:
return True
return accessing_obj in obj.decorators
except (AttributeError, ValueError, TypeError):
return False | 068435819a71dd8ad30595c6b65a1a94b674e676 | 694,091 |
import re
def _importer(string, scope):
"""
Parse expressions and import modules into a scope.
"""
matches = set(re.findall("([a-zA-Z_.][a-zA-Z0-9_.]*)", string))
for m in matches:
split = m.split('.', 1)
# Like: json.loads(line)
if len(split) == 1:
module = split[0]
other = []
# Like: tests.module.upper(line)
elif len(split) == 2:
module, other = split
other = [other]
# Shouldn't hit this
else: # pragma no cover
raise RuntimeError("Error importing: {}".format(m))
# Are you trying to figure out why relative imports don't work? If so,
# the issue is probably `m.split()` producing ['', 'name'] instead of
# ['.name']. `__import__('.name')__` doesn't appear to work though,
# so good luck!
if not module:
continue
try:
scope[module] = __import__(
module,
fromlist=list(map(str, other)), # Python 2 can't handle unicode
level=0)
except ImportError:
pass
return scope | 1f7f4321bc501779b8149b88f41cf13edd2df8c6 | 694,092 |
import os
def get_file_name_no_ext(path):
"""Return file name without extension"""
_, tail = os.path.split(path)
(nb_name, _) = os.path.splitext(tail)
return nb_name | 48ac26f9b04f81b1ede8c3cfd3d0432c9d949b8f | 694,093 |
def sort(data, delimiter=","):
"""
Sorts an array of CSV data rows stored as strings and returns it as a list.
Args:
data (list): input list of string rows or row values as a list to sort.
delimiter (str): delimiter of the CSV format to use for value splitting.
Returns:
list of sorted rows in string format.
"""
data_sorted = []
for row in data:
# Convert our string row into a list of strings if it's not already one.
values = row.split(delimiter) if type(row) is str else row
# Store our identifier.
id = values[0]
# Sort our row and add the identifier back to the start.
row_sorted = sorted(values[1:])
row_sorted.insert(0, id)
# Insert our sorted data back into list.
data_sorted.append(row_sorted)
return data_sorted | e7d4d3f2d36355fd4ce032b132ae7160e6446357 | 694,094 |
def nms_1d(v):
"""
:param v: a 1D numpy array
:return:
"""
v_out = v.copy()
len = v.shape[0]
if len < 2:
return v
for i in range(len):
if i is not 0 and v[i - 1] > v[i]:
v_out[i] = 0.
elif i is not len-1 and v[i+1] > v[i]:
v_out[i] = 0.
return v_out | 35e73e9d8df109185bf378656423663286122d1b | 694,095 |
def get_average_mos(solution, ignore_non_served=False):
"""Returns the average MOS of a solution.
"""
smos = 0
nserved = 0
for u in solution["users"]:
if "mos" in u:
smos += u["mos"]
nserved += 1
if ignore_non_served:
# only take into account users that get some video
return smos/nserved
else:
# calculate avg QoE across all users
return smos/len(solution["users"]) | e75a4e7be7012e4a12dfc127119f7c2b4fc1bbb0 | 694,096 |
import traceback
def _format_exception(exc_type, exc_value, exc_traceback):
"""Own customization of traceback.format_exception().
When the result comes from another process the traceback is already
serialized thus we just have to return it.
"""
# HTestResultServer passes serialized traceback.
if isinstance(exc_traceback, list):
return exc_traceback
return traceback.format_exception(exc_type, exc_value, exc_traceback) | cf5d60f7cee4dd161b4a7965dd6565d7b2e891e9 | 694,097 |
def clamp(value, minimum, maximum):
"""
Return clamped value between minimum and maximum.
:param float value:
:param float minimum:
:param float maximum:
"""
if maximum < minimum:
raise ValueError(f"{maximum} is smaller than {minimum}")
return max(minimum, min(value, maximum)) | de32469ad3f7b9c772ccb870a9d16520bc59f481 | 694,098 |
def rc_expanded(seq):
"""
get the reverse complemnt of seq
this one works for the expanded alphabet;
R=[AG], Y=[CT], K=[GT], M=[AC], S=[GC], W=[AT], and the four-fold
[ACT] = h, [ACG] = v, [AGT] = d, [CGT] = b
degenerate character N=[ATCG]
"""
compdict = {'a': 't',
'c': 'g',
'g': 'c',
't': 'a',
'r': 'y',
'y': 'r',
'k': 'm',
'm': 'k',
's': 's', # palindromic
'w': 'w', # palindromic
'n': 'n', # all;
"h": "d", # threes
"v": "b",
"d": "h",
"b": "v"
}
tseq = [compdict[i] for i in seq] # new list
tseq.reverse()
return "".join(tseq) | 377f470c4d20d3f05afc29ec83005af3749e7405 | 694,099 |
def replace_template_path(path):
""" replace original template path with new dict """
segments = path.split(".")
module = ".".join(segments[0:-1])
name = segments[-1]
if module == "ipypublish.html.ipypublish":
return {
"module": "ipypublish.templates.segments",
"file": "ipy-{0}.html-tplx.json".format(name),
}
elif module == "ipypublish.html.standard":
return {
"module": "ipypublish.templates.segments",
"file": "std-{0}.html-tplx.json".format(name),
}
elif module == "ipypublish.latex.standard":
return {
"module": "ipypublish.templates.segments",
"file": "std-{0}.latex-tpl.json".format(name),
}
elif module == "ipypublish.latex.ipypublish":
return {
"module": "ipypublish.templates.segments",
"file": "ipy-{0}.latex-tpl.json".format(name),
}
else:
print("Warning: unknown template path: {}".format(path))
return {"module": module, "file": "{0}.json".format(name)} | 7168c70b31e3aef9f6da1e148ea9bf43a2981d12 | 694,101 |
import os
def split_filename_suffix(filepath):
"""
给定filepath 返回对应的name和suffix. 如果后缀是多个点,仅支持.tar.gz类型
:param filepath: 文件路径
:return: filename, suffix
"""
filename = os.path.basename(filepath)
if filename.endswith('.tar.gz'):
return filename[:-7], '.tar.gz'
return os.path.splitext(filename) | 4e32140ad8c4357d5eb4ce0a4f7815f2520ab5c4 | 694,102 |
import getpass
def password(what):
"""Prompt the user for a password and verify it.
If password and verify don't match the user is prompted again
Args:
what (string) : What password to enter
Returns:
(string) : Password
"""
while True:
pass_ = getpass.getpass("{} Password: ".format(what))
pass__ = getpass.getpass("Verify {} Password: ".format(what))
if pass_ == pass__:
return pass_
else:
print("Passwords didn't match, try again.") | bf2328d709490333cbc68c2620ee3d0dc087e6e2 | 694,103 |
def checkListAgainstDictKeys(theList, theDict):
"""
Given a list of items, remove any items not in specified dict
:param theList: list, list of items that may not be in theDict
:param theDict: dict, dictionary against which to check (the keys)
:return:
inlist, list, items from list found in dict keys
outlist, list, items from list NOT found in dict keys
"""
# The items to return
inList = list()
outList = list()
# extract the keys as a set
keySet = set(theDict.keys())
# Sift through the sample
for item in theList:
if item in keySet:
inList.append(item)
else:
outList.append(item)
# end if
# end loop
inList.sort()
outList.sort()
return inList, outList | 4274c48cc9e85d4632a3591e4ae1e4ecd7c236c8 | 694,104 |
def title(txt):
""" Provide nice title for parameterized testing."""
return str(txt).split('.')[-1].replace("'", '').replace('>', '') | 06104d59d4ef4770cba4e4951a4be7c1f425890c | 694,106 |
import collections
def convert_defaultdict_to_regular_dict(inputdict: dict):
"""
Recursively convert defaultdict to dict.
"""
if isinstance(inputdict, collections.defaultdict):
inputdict = {
key: convert_defaultdict_to_regular_dict(value)
for key, value in inputdict.items()
}
return inputdict | 17d56b3ae8db0fb91bdb086b953f405a35741b4f | 694,107 |
def get_mung_locs(anc, code, output_bucket):
""" Convenience function to obtain the expected locations for munged scripts.
Parameters
----------
anc : :obj:`str`
Ancestry prefix.
code : :obj:`str`
Phenotype code.
output_bucket : :obj:`str`
The bucket in which the mugning output is stored.
Returns
-------
:obj: `str` with file location, :obj:`str` with log location
"""
file_loc = output_bucket + anc + '/munged_sumstats/' + code + '.sumstats.gz'
log_loc = output_bucket + anc + '/munged_sumstats/' + code + '.log'
return file_loc, log_loc | b5b4d65040229797e37568ae57659aa575887bd4 | 694,110 |
def extend_empty_sets(empty: dict, grammar: dict) -> dict:
"""
Determine which nonterminals of an LL(1) grammar can be empty.
Must be applied repeatedly until converging.
:param empty: nonterminal -> bool
:param grammar: nonterminal -> [productions...]
:returns: Extended copy of ``empty``
"""
return {
symbol: empty.get(symbol) or any(
all(empty.get(p) for p in production)
for production in productions
)
for symbol, productions in grammar.items()
} | a91b18b217d3c8fee7ef86b0eba2d2bd92422ca5 | 694,111 |
from typing import Any
def synchronize_dropdowns(tab: str, store: Any) -> Any:
"""When the data store changes, update the selected countries."""
return store, store | 107c49e319cdffaa4366d59ed7ef7cfee37aedc6 | 694,112 |
import sys
def findPythonExecutable():
"""Returns the path to the python executable. Used by the
:meth:`NotebookServer.run` method to start the notebook server in
a separate process.
"""
# No problem if we're not on macOS, or
# we're not running in a conda environment
if sys.platform != 'darwin':
return sys.executable
# Don't rely on environment variables to
# detect conda, because we may have been
# called without our environment being
# activated
if not op.exists(op.join(sys.prefix, 'conda-meta')):
return sys.executable
# On macOS+conda, we need to make sure
# that a framework interpreter is used,
# The conda-forge python.app package
# sets the PYTHONEXECUTABLE variable,
# which causes sys.executable to be
# clobbered. So we need to try and find
# an appropriate executable to use.
#
# (Note: we could solve this problem by
# changing how the notebook server is
# executed, e.g. via an independent
# script instead of via fsleyes.main).
executable = sys.executable
exes = ['pythonw', 'python.app', 'python']
dirs = [op.join(sys.exec_prefix, 'bin'),
op.dirname(sys.executable)]
for exe, dirname in it.product(exes, dirs):
exe = op.join(dirname, exe)
if op.exists(exe):
executable = exe
break
return executable | 0b8b154a14473d239bb9db3a0016c6bfd46c30b2 | 694,114 |
import json
def json_read(path):
"""
Reads JSON file from the given path.
"""
with open(path, mode="r", encoding="utf-8") as file:
res = json.load(file)
return res | a3be51d57501a3d7833398b1763b024ba6a2f215 | 694,115 |
def cycle(counter, rule):
"""Returns True when a given forloop.counter is conforming to the
specified `rule`. The rules are given as strings using the syntax
"{step}/{scale}", for example:
* ``forloop.counter|cycle:"1/2"`` returns True for even values
* ``forloop.counter|cycle:"2/2"`` returns True for odd values
What's interesting about this filter is that it works also for more
complex steppings:
* ``forloop.counter|cycle:"1/3"`` returns True every third value
* ``forloop.counter|cycle:"2/3"`` returns True every third value but one
step after 1/3 would
* ``forloop.counter|cycle:"3/3"`` returns True every third value but two
steps after 1/3 would
"""
step, scale = (int(elem) for elem in rule.split("/"))
result = (counter - 1) % scale == step - 1
return result | 0f0da401539bd97c8441f395b2bbd1962facf7cb | 694,116 |
def CreateModelInfo(model_info_d):
"""
model_info_d: (dict)
model_in_use: (str)
"""
mm = model_info_d['model_in_use']
HTML_l = ["<h2> Information on Model: </h2>"]
HTML_l += ["<h3> Using the following given Model:</h3>"]
HTML_l += ["<h4>" + mm.split('/')[-1] + "</h4>"]
HTML_l += ["<h1>--------------------------------------</h1>"]
return "\n".join(HTML_l) | f3caaa37752a2e29525212c6b2891f80aafbbd87 | 694,117 |
import hashlib
def get_hash(content):
"""Return the hex SHA-1 hash of the given content."""
return hashlib.sha1(content).hexdigest() | 8b202cdffe035d039651c18b780ae0c901a7b8f1 | 694,118 |
from io import StringIO
import re
def decode_license(data):
""" Decode US Drivers License from a PDF417 barcode """
try:
sio = StringIO(data)
if sio.read() != '@':
return None
header_groups = re.match("^\n\x1e\r([ a-zA-Z]{5})([0-9]{6})([0-9]{2})([0-9]{2})$",sio.read(18))
if not header_groups:
return None
subfile_count = int(header_groups.group(4))
subfile = {}
for i in range(subfile_count):
subfile_type = sio.read(2)
subfile_offset = int(sio.read(4))
subfile_length = int(sio.read(4))
subfile[subfile_type] = (subfile_offset,subfile_length)
elements = {}
for i in range(subfile_count):
subfile_data = ''
c = sio.read(1)
while c != '\r' and c != '':
subfile_data += c
c = sio.read(1)
for element in subfile_data[2:].split('\n'):
element = element.strip()
if len(element) > 3:
elements[element[0:3]] = element[3:]
return elements
except:
return None | 8474827d3aa922af59c4d87c48188fe0b99bb45d | 694,119 |
def main(stdin):
"""
Aggregate the word-count pairs.
"""
(word, count) = (None, 0)
for line in stdin:
(new_word, new_count) = line.split()
new_count = int(new_count)
# If we've seen this word before, continue tally,...
if new_word == word:
count += new_count
# ...otherwise...
else:
# ...print the current tally if not at start...
if word != None:
print(("{word}\t{count}").format(word=word, count=count))
# ...and reset the tally.
count = new_count
# Track the last word seen.
word = new_word
# At end, print the last tally.
print(("{word}\t{count}").format(word=word, count=count))
return None | 5c7e2d92740a639fd86031287a358b83b99d8cb8 | 694,120 |
def model_auth_fixture() -> dict:
"""Function to generate an auth dictionary with identical keys to those
returned from a get model request.
Returns:
dict: Mock auth dictionary
"""
auth = {
"asset_id": "0a0a0a0a-0a00-0a00-a000-0a0a0000000a",
"reason": "reason for access",
"view": True,
"read": True,
"update": False,
"destroy": False
}
return auth | 56a05a135005b8d551a51f90f3bdef0bff672f72 | 694,121 |
def is_yaml(path):
"""
Checks whether the file at path is a yaml-file.
Parameters
----------
path : str
The relative path to the file that should be checked.
Returns
-------
bool
Whether or not the specified file is a yaml-file.
"""
return path.endswith('.yaml') or path.endswith('.yml') | 425926ecd164c2d1196276e5da958a6976e96f08 | 694,122 |
def render_flags(flags, bit_list):
"""Show bit names.
"""
res = []
known = 0
for bit in bit_list:
known = known | bit[0]
if flags & bit[0]:
res.append(bit[1])
unknown = flags & ~known
n = 0
while unknown:
if unknown & 1:
res.append("UNK_%04x" % (1 << n))
unknown = unknown >> 1
n += 1
if not res:
return '-'
return ",".join(res) | ce0086a003cad260bff8560608f76bfdd6b3c4b1 | 694,123 |
def _get_secondary_status(row):
"""Get package secondary status."""
try:
return row.find('div', {'id': 'coltextR3'}).contents[1]
except (AttributeError, IndexError):
return None | faa2f280e978c574280e610453ceba6116931c41 | 694,126 |
def ensure_plan(planner, name):
"""
:type planner: office365.planner.planner_user.PlannerUser
:type name: str
:rtype: PlannerPlan
"""
plans = planner.plans.get().filter("title eq '{0}'".format(name)).execute_query()
if len(plans) > 0:
return plans[0]
else:
return planner.plans.add(title=name).execute_query() | e11f40f29c1773a8729a96118ae1224c63571db0 | 694,127 |
def _depgrep_parens_action(_s, _l, tokens):
"""
Builds a lambda function representing a predicate on a tree node
from a parenthetical notation.
"""
# print 'parenthetical tokens: ', tokens
assert len(tokens) == 3
assert tokens[0] == "("
assert tokens[2] == ")"
return tokens[1] | 0e94cdf89c85f6e4b9c311673707b5a67120da6c | 694,128 |
def get_treetagger_triple(string):
"""
Split a single line from TreeTagger's output to obtain a
(word,pos,lemma) triple.
"""
elems = string.split('\t')
# Lines that don't contain exactly 2 tabs are ignored. These are
# usually lines containing a single <repdns> or <repurl> element
# which TreeTagger uses to indicate that it has replaced the token
# in the previous (token, pos, lemma) triple with a special symbol
# (e.g. dns-remplacé). The replaced text is in the "text"
# attribute of the repdns or repurl element.
if len(elems) == 3:
return elems
else:
return None | 292abdae3b7ac1b13ffb68e5bd86ac625b9c3c03 | 694,129 |
def _basename_of_variable(varname):
"""Get the base name of a variable, without the time index.
Given a variable "FooBar_10", the last component is the time index (if present),
so we want to strip the "_10". This does no error checking. You should not be
passing in a variable whose name starts with "Seq_"; this is for user-generated
names, not the internal names for DataTank.
"""
comps = varname.split("_")
# handle variables that aren't time-varying
if len(comps) == 1:
return varname
# this is kind of a heuristic; assume anything that is all digits is a time index
return "_".join(comps[:-1]) if comps[-1].isdigit() else varname | 29e36a160e6885ce4a1e6bbe9d0ebdd038574443 | 694,130 |
def processing_tasks():
"""Task processing normally runs continually when triggered by a GET
request to /_ah/start. In order to make this more testable, we
process tasks until this method returns False, which can only happen
if this method is overridden by the tests.
"""
return True | 0201a164081815ecd4925fee2b59d3d9b3807dce | 694,131 |
import random
def Gen_Secret():
"""
Replace this method with a secure random generator
"""
rand = ''
for i in range(0, 32):
temp = random.randrange(0, 255)
temp = hex(temp)[2:]
if len(temp) == 1:
temp = f'0{temp}'
rand += temp
return rand | 975716e84960b3b524539db2d8bb7a71bf4f3e01 | 694,133 |
def _matrices(l): # {{{1
"""Return the matrices as LaTeX arrays in the form of
a list of lines."""
def pm(mat, r=6):
"""Return the contents of a matrix."""
lines = []
for t in range(r):
numl = []
for m in range(r):
num = mat[t][m]
if num == 0.0:
nums = "0"
else:
nums, exp = "{:> 10.4e}".format(mat[t][m]).split("e")
exp = int(exp)
if exp != 0:
nums += "\\times 10^{{{}}}".format(exp)
numl.append(nums)
lines.append(" " + " & ".join(numl) + r"\\")
return lines
lines = [
" \\vbox{",
" \\vbox{\\small\\textbf{In-plane stiffness (ABD) matrix}\\\\[-3mm]",
" \\tiny\\[\\left\\{\\begin{array}{c}",
" N_x\\\\ N_y\\\\ N_{xy}\\\\ M_x\\\\ M_y\\\\ M_{xy}",
" \\end{array}\\right\\} = ",
" \\left|\\begin{array}{cccccc}",
]
lines += pm(l.ABD)
lines += [
" \\end{array}\\right| \\times",
" \\left\\{\\begin{array}{c}",
" \\epsilon_x\\\\[2pt] \\epsilon_y\\\\[2pt] \\gamma_{xy}\\\\[2pt]",
" \\kappa_x\\\\[2pt] \\kappa_y\\\\[2pt] \\kappa_{xy}",
" \\end{array}\\right\\}\\]",
" }",
" \\vbox{\\small\\textbf{Transverse stiffness (H) matrix}\\\\[-2mm]",
" \\tiny\\[\\left\\{\\begin{array}{c}",
" V_y\\\\ V_x",
" \\end{array}\\right\\} = ",
" \\left|\\begin{array}{cc}",
]
lines += pm(l.H, r=2)
lines += [
" \\end{array}\\right| \\times",
" \\left\\{\\begin{array}{c}",
" \\gamma_{yz}\\\\[2pt] \\gamma_{xz}",
" \\end{array}\\right\\}\\]",
" }",
]
lines += [
" \\vbox{\\small\\textbf{3D stiffness tensor (C), contracted notation}\\\\[-3mm]",
" \\tiny\\[\\left\\{\\begin{array}{c}",
" \\sigma_{11}\\\\ \\sigma_{22}\\\\ \\sigma_{33}\\\\ \\sigma_{23}\\\\ \\sigma_{13}\\\\ \\sigma_{12}",
" \\end{array}\\right\\} = ",
" \\left|\\begin{array}{cccccc}",
]
lines += pm(l.C)
lines += [
" \\end{array}\\right| \\times",
" \\left\\{\\begin{array}{c}",
" \\epsilon_{11}\\\\[2pt] \\epsilon_{22}\\\\[2pt] \\epsilon_{33}\\\\[2pt]",
" 2\\cdot\\epsilon_{23}\\\\[2pt] 2\\cdot\\epsilon_{13}\\\\[2pt] 2\\cdot\\epsilon_{12}",
" \\end{array}\\right\\}\\]",
" }",
" }",
]
return lines | 5d4a6bd13de7243809e1ebf82b96631bf3bd0727 | 694,134 |
import time
def time_str(time_s: float) -> str:
"""Concert a timestamp to a String."""
return time.strftime("%c", time.localtime(time_s)) | f8f82cf32234b1468b4402d1f8c823448229c91a | 694,135 |
def merge_args_to_kwargs(argspec, args, kwargs):
"""
Based on the argspec, converts args to kwargs and merges them into kwargs
Note:
This returns a new dict instead of modifying the kwargs in place
Args:
argspec (FullArgSpec): output from `inspect.getfullargspec`
args (tuple): List of args
kwargs (dict): dict of names args
Returns:
dict: dict of original named are and args that were converted to named agrs
"""
fn_args_to_kwargs = {arg_name: arg_val for arg_name, arg_val in zip(argspec.args, args)}
# We create a copy of the dict to keep the original **kwarg dict pristine
fn_kwargs = kwargs.copy()
fn_args_to_kwargs.update(fn_kwargs)
default_arg_value = zip(reversed(argspec.args), reversed(argspec.defaults or ()))
# Apply defaults if values are missing
for arg_name, arg_val in default_arg_value:
if arg_name not in fn_args_to_kwargs:
fn_args_to_kwargs[arg_name] = arg_val
if argspec.varargs and len(args) > len(argspec.args):
# We were given more args than possible so they much be part of *args
fn_args_to_kwargs['*{}'.format(argspec.varargs)] = args[len(argspec.args)-len(args):]
return fn_args_to_kwargs | 09f82f7af0adbf640f173b65cc0c656638a1ac2b | 694,136 |
def _check_flip(origin_imgs, result_imgs):
"""Check if the origin_imgs are flipped correctly."""
h, w, c = origin_imgs.shape
for i in range(h):
for j in range(w):
for k in range(c):
if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]:
return False
return True | 57c62444c01fcc6c70397503f8206fd5350b1e81 | 694,138 |
def valid_url_string() -> str:
"""Return string of a valid URL."""
return "https://example.com" | c6a50483346b41582d10047cad5f25cdd5e2f986 | 694,139 |
import numpy
def trim_adaptors_check(records, adaptor, min_match=6):
"""Trims perfect adaptor sequences.
This is a generator function, the records argument should
be a list or iterator returning SeqRecord objects.
http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc289
"""
len_adaptor = len(adaptor) #cache this for later
withadapter = 0
trimmedbases, totalbases = [], []
for record in records:
len_record = len(record)
totalbases.append(len_record)
index = record.seq.find(adaptor[0:min_match])
if not index == -1:
withadapter=withadapter+1
trimmedbases.append(index)
mean_readlen = numpy.mean(totalbases)
std_readlen = numpy.std(totalbases)
readlen_trimmed = numpy.mean(trimmedbases)
std_readlen_trimmed =numpy.std(trimmedbases)
totalreads = len(totalbases)
return totalreads, withadapter, mean_readlen, std_readlen, readlen_trimmed, std_readlen_trimmed | a4399737528fc8e568649349eb19fb9ac6630e74 | 694,140 |
def _update_distance(v, v_neighbor,
w_graph_d, distance_d, previous_d):
"""Update distance with previous node."""
if (distance_d[v_neighbor] >
distance_d[v] + w_graph_d[v][v_neighbor]):
distance_d[v_neighbor] = (
distance_d[v] + w_graph_d[v][v_neighbor])
previous_d[v_neighbor] = v
return distance_d, previous_d | f485a7586f89c4560d9638eb097aa673ab9069c1 | 694,141 |
import asyncio
def _to_coroutine(coro):
"""関数をコルーチンに変換する"""
loop = asyncio.get_event_loop()
async def _coro(t):
return await loop.run_in_executor(None, coro, t)
return _coro | 40d25bed589772fb036920f5f60bc1fc01f9cce2 | 694,142 |
def get_str(obj, field, length):
"""
Obtain the str value,
:param obj:
:param length:
:return:
"""
value = obj.get(field)
if value is not None:
value = str(value)[:length]
return value | 3e1421fda814290815a023e7cc3316032a2280ca | 694,143 |
import logging
import functools
import time
def status(status_logger: logging.Logger):
"""
Decorator to issue logging statements and time function execution.
:param status_logger: name of logger to record status output
"""
def status_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
name = func.__name__
status_logger.info(f'Initiated: {name}')
start = time.time()
result = func(*args, **kwargs)
end = time.time()
status_logger.info(f'Completed: {name} -> {end - start:0.3g}s')
return result
return wrapper
return status_decorator | f6e0ce2391ae3450b0ef7fbc827789cd99889d89 | 694,144 |
import zipfile
def get_parts(fname):
"""Returns a list of the parts in an OPC package.
"""
with zipfile.ZipFile(fname) as zip_archive:
parts = [name for name in zip_archive.namelist()]
return sorted(parts) | 458bd85f94df5ab78804eb1df791e6ad54c266c5 | 694,145 |
import pathlib
def _relative_if_subdir(fn):
"""Internal: get a relative or absolute path as appropriate.
Parameters
----------
fn : path-like
A path to convert.
Returns
-------
fn : str
A relative path if the file is underneath the top-level project directory, or an
absolute path otherwise.
"""
fn = pathlib.Path(fn).resolve()
try:
return fn.relative_to(pathlib.Path.cwd())
except ValueError:
return fn | 9efbd0e3b4585315a6c87c0fb3a6cf1082948f11 | 694,146 |
import re
def normalize(string):
"""
This function normalizes a given string according to
the normalization rule
The normalization rule removes "/" indicating filler words,
removes "+" indicating repeated words,
removes all punctuation marks,
removes non-speech symbols,
and extracts orthographic transcriptions.
Arguments
---------
string : str
The string to be normalized
Returns
-------
str
The string normalized according to the rules
"""
# extracts orthographic transcription
string = re.sub(r"\(([^)]*)\)\/\(([^)]*)\)", r"\1", string)
# removes non-speech symbols
string = re.sub(r"n/|b/|o/|l/|u/", "", string)
# removes punctuation marks
string = re.sub(r"[+*/.?!,]", "", string)
# removes extra spaces
string = re.sub(r"\s+", " ", string)
string = string.strip()
return string | 14a62512248979b7ac7ef467b0f74a5201d7e418 | 694,147 |
def user_directory_path(instance, filename):
"""File will be uploaded to MEDIA_ROOT/user_<id>/<filename>"""
return 'user_{0}/{1}'.format(instance.user.id, filename) | 7c12b39184eb358bc7c895ef6c522a6591f7ff4d | 694,148 |
def key_for_value(d, looking):
"""Get the key associated with a value in a dictionary.
Only top-level keys are searched.
Args:
d: The dictionary.
looking: The value to look for.
Returns:
The associated key, or None if no key found.
"""
found = None
for key, value in d.items():
if looking == value:
found = key
break
return found | dd1f04f0232ec5a91556b1858a7481c481e85271 | 694,149 |
def getJsonNodeContent(node, consrc):
"""
"""
return '' | b9ff3bd65c2ec1f49506bcbb356ba0e5bf3985c5 | 694,151 |
import math
def cosine(r_tokens: list, s_tokens: list) -> float:
"""Computes cosine similarity.
COS(r, s) = |r ∩ s| / sqrt(|r| * |s|)
Parameters
----------
r_tokens : list
First token list.
s_tokens : list
Second token list.
Returns
-------
Cosine similarity of r and s.
"""
return len(set(r_tokens).intersection(s_tokens)) / math.sqrt(len(r_tokens) * len(s_tokens)) | 234b7298e8c0c29cbb3d79d420e63518decfe4e9 | 694,152 |
def _dir(m, skip=()):
"""
Get a list of attributes of an object, excluding
the ones starting with underscore
:param m: object, an object to get attributes from
:return: list, a list of attributes as strings
"""
return [a for a in dir(m) if not a.startswith('_') and a not in skip] | 2903d4f91031fa8b7a0fbb81679d879a16af37be | 694,153 |
from pathlib import Path
def path(tmp_path: Path) -> Path:
"""Fixture for the configuration file."""
return tmp_path / ".config" / "config.toml" | 461e49a12e9c999a1ea106d8aba179150ebe6f47 | 694,154 |
import torch
def logistic_sampler(mu, s):
"""https://en.wikipedia.org/wiki/Logistic_distribution"""
u = torch.rand_like(mu)
x = torch.log(u) - torch.log(1-u)
return mu + s * x | b64fb8956eba431e94a97a3386460fd0e85952a7 | 694,155 |
def networkType(host):
"""Determine if a host is IPv4, IPv6 or an onion address"""
if host.find('.onion') > -1:
return 'onion'
elif host.find(':') == -1:
return 'IPv4'
return 'IPv6' | a0d3e92764cc562c43e643cd596762bce8ccfd58 | 694,156 |
import os
def make_dir(dir_path):
"""
:param dir_path: new directory path
:return: str path
"""
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
return dir_path | 08eb4c74b38d3c67fd6d9c24fecb6d1eb8277d43 | 694,157 |
def generate_identifier(order):
"""Return a unique identifier by concatenating a lowercased stripped
version of firstname and lastname of the ninja"""
# get first and last names and convert to lowercase
first_name = order.get('Voornaam').lower()
last_name = order.get('Achternaam').lower()
#return as concatenated string with spaces stripped out
return (first_name + last_name).translate(None, ' ') | 219742da3cd749acd5ccd4fa75a92d673c4e194e | 694,158 |
def hasProperty(propertyName, typeObj):
"""check up if the as parameter given type has a property with the
given name
Keyword arguments:
propertyName -- name of the property to look for
typeObj -- type object to check up
"""
if not hasattr(typeObj, 'properties'):
return False
for property in typeObj.properties:
if property.name == propertyName:
return True
return False | 974a0ed2a477cb3edbf8095c2948b8f206a2bfc7 | 694,159 |
import logging
def validate_input(key, value):
""" Validate a user input
This function ensures that the user enters a valid input
(could be username or filename). Here, the inputs are valid
if they are non-empty. The function then returns the appropriate
status code and message.
Args:
key (str): name of input to validate (e.g. username, filename)
value (str): value of input to validate (e.g. user1, file.png)
Returns:
status (dict): status message and status code
"""
if len(value) == 0:
status = {"code": 400,
"msg": "Field " + key + " cannot be empty."}
logging.warning("Field " + key + " cannot be empty.")
else:
status = {"code": 200,
"msg": "Request was successful"}
return status | f678018e8cf7de6b6a7f6c17728a2e6b470ed0cb | 694,160 |
import subprocess
def GetFileArchTypes(file_path):
"""Gets the architecture types of the file."""
output = subprocess.check_output(['/usr/bin/lipo', file_path,
'-archs']).decode('utf-8').strip()
return output.split(' ') | 437947cec9b6096e5b517fdf97b562ad7496f90d | 694,161 |
def db_name(request, pytestconfig):
""" :return: 'db_name' value """
db_name = request.config.getoption("--influxdb_name")
if not db_name:
db_name = pytestconfig.getini("influxdb_name")
return db_name | cea0b4476c266d9d1cc5c2140ac022f9d15db17c | 694,162 |
def index_schema(schema, path):
"""Index a JSON schema with a path-like string."""
for section in path.split("/"):
if schema["type"] != "object":
raise ValueError(
"Only object types are supported in the schema structure, "
"but saw type %s" % schema["type"]
)
properties = schema["properties"]
if section not in properties:
raise ValueError("Invalid path %s in user options" % path)
schema = properties[section]
return schema | 338a22f670a9ec84626113bb978a6a9577cad8e8 | 694,163 |
import os
def list_files(path):
"""
List all files in given directory
:param path:
:return:
"""
return [os.path.join(path, img_path) for img_path in os.listdir(path)] | 1489450ce82c0f9031bddacafd4be4feaab31310 | 694,164 |
def getIndexHash():
"""(Integration only)
Retrieves the hashed value of the tenant in which ran in
:return: Hashed value of tenant name
:rtype: ``str``
"""
return '' | 9112514962c0bcbb16b9d9eb153761ad0fc53f37 | 694,165 |
import base64
def getFilelistBase(path):
"""
获取该路径下所有文件信息
:param path: 文件路径
:return: PHP->base64 code
"""
code = """
header("Content-Type:application/json");
@ini_set("display_errors","0");
@set_time_limit(0);
@set_magic_quotes_runtime(0);
function getfile($path){
$i=0;
$res = array();
if($handler = opendir($path)){
while (($file = readdir($handler)) !==false){
$f = array();
$f["name"] = $file;
$f['type'] = filetype($path ."/". $file);
$f['time'] = date("Y-m-d H:i:s", filemtime($path ."/". $file));
$f['size'] = filesize($path ."/". $file);
$res[$i] = $f;
$i++;
}
closedir($handler);
}
echo ("<ek>");
echo json_encode($res);
echo ("</ek>");
}
getfile("%s");die();
"""% path
return base64.b64encode(code.encode("UTF-8")).decode("UTF-8") | d3de12be95405e4ac8f38a08bf6f8c19bf3abb38 | 694,166 |
def dict_append_to_value_lists(dict_appendee, dict_new):
"""
Appends values from dict_new to list of values with same key in dict_appendee
Args:
dict_appendee: dict with value lists (as created by dict_values_to_lists function
dict_new: dict with new values that need to be appended to dict_appendee
Returns:
dict with appended values
"""
if dict_new is None:
return None
for key in dict_new.keys():
if type(dict_appendee[key]) != list:
raise TypeError("Dict value is not a list")
dict_appendee[key] += [dict_new[key]]
return dict_appendee | 3f39a6bca91c3429a04f0047673f6231d29336eb | 694,168 |
def super_digit(n: int) -> int:
"""Return the result of summing of a number's digits until 1 digit remains."""
ds = n % 9
if ds:
return ds
return 9 | 842e644b2a2916ca75f861e2177b2db52ef313db | 694,170 |
def highlight_max(data):
"""
highlight the maximum in a Series or DataFrame
"""
colors = ["#64db00", "#76FF03", "#e1ffc7"]
attr = f"background-color: {colors[0]}"
lighter_attr = f"background-color: {colors[1]}"
lightest_attr = f"background-color: {colors[2]}"
if (
data.ndim == 1 and "BERTopic" not in data
): # Series from .apply(axis=0) or axis=1
maximum = data.max()
second_to_max = max([val for val in data if val != maximum])
third_to_max = max([val for val in data if val not in [maximum, second_to_max]])
to_return = []
for value in data:
if value == maximum and type(value) != str:
to_return.append(attr)
elif value == second_to_max and type(value) != str:
to_return.append(lighter_attr)
elif value == third_to_max and type(value) != str:
to_return.append(lightest_attr)
else:
to_return.append("")
return to_return
else:
return data | 5e6766b389cc0d3d58e57dca0e862390cf51e3ad | 694,171 |
def load_from_lsf(script_file_name):
"""
Loads the provided scritp as a string and strips out all comments.
Parameters
----------
:param script_file_name: string specifying a file name.
"""
with open(script_file_name, 'r') as text_file:
lines = [line.strip().split(sep = '#', maxsplit = 1)[0] for line in text_file.readlines()]
script = ''.join(lines)
if not script:
raise UserWarning('empty script.')
return script | ed91b1a4b21f30e3229b71edaab71a6c7ddc33af | 694,172 |
def has_activity(destination, activity_name):
"""Test if a given activity is available at the passed destination/event/tour."""
return destination.has_activity(activity_name) if destination else False | 01fde72d29bd59deeedc18a3ab1322eb85e58fb3 | 694,173 |
def calculate_coordinates(pth):
"""
Create a set of tuples representing the coordinates that the path
traverses, with a starting point at (0,0)
"""
x = 0
y = 0
coords = set()
for instruction in pth:
direction = instruction[:1]
distance = int(instruction[1:].strip())
if direction.lower() == "d":
for _ in range(distance):
y -= 1
coords.add((x, y))
elif direction.lower() == "u":
for _ in range(distance):
y += 1
coords.add((x, y))
elif direction.lower() == "l":
for _ in range(distance):
x -= 1
coords.add((x, y))
elif direction.lower() == "r":
for _ in range(distance):
x += 1
coords.add((x, y))
else:
raise Exception(f"Unknown direction {direction}")
return coords | 0bba7e13ec8480104f6a96f7caae7f2bd73d3e2d | 694,174 |
def calculate_interval(pi, chrm, pos):
"""
Determines how a position is mapped to an interval number using pi.
Arguments:
pi (dict of lists of ints): the pi map
chrm (int): chromosome number
pos (int): SNP location
Returns:
The interval associated with pos on chrm if the mapping is successful, otherwise None
"""
try:
chrmArray = pi[chrm]
for start, end, ind in chrmArray:
if start <= pos and pos <= end:
return ind
#position was not found in intervals
return None
except KeyError:
#chromosome was not found in pi
return None | 21410b17b8b125a60065b637ea182e92b98d3591 | 694,176 |
def ethiopian_calc(Z1, Z2):
"""Diese Funktion multipliziert zwei Zahlen auf Aethiopische Art"""
produkt = 0
if Z1 < Z2:
zahl_klein = Z1
zahl_gross = Z2
else:
zahl_klein = Z2
zahl_gross = Z1
while zahl_klein >= 1:
if zahl_klein % 2 != 0:
produkt = produkt + zahl_gross
zahl_klein = zahl_klein // 2
zahl_gross = zahl_gross * 2
return produkt | 0bd0f022f034d6e7d7dae289c3af7fad416e90c1 | 694,177 |
import os
def get_path():
"""Returns the path of this template directory"""
return os.path.dirname(os.path.abspath(__file__)) | 83bd58cc76db257f316b023af413a1308b73e137 | 694,178 |
def get_fpn_config(base_reduction=8):
"""BiFPN config with sum."""
p = {
'nodes': [
{'reduction': base_reduction << 3, 'inputs_offsets': [3, 4]},
{'reduction': base_reduction << 2, 'inputs_offsets': [2, 5]},
{'reduction': base_reduction << 1, 'inputs_offsets': [1, 6]},
{'reduction': base_reduction, 'inputs_offsets': [0, 7]},
{'reduction': base_reduction << 1, 'inputs_offsets': [1, 7, 8]},
{'reduction': base_reduction << 2, 'inputs_offsets': [2, 6, 9]},
{'reduction': base_reduction << 3, 'inputs_offsets': [3, 5, 10]},
{'reduction': base_reduction << 4, 'inputs_offsets': [4, 11]},
],
'weight_method': 'fastattn',
}
return p | aa94e826dc42e6aef1a4f1bcaf176c08a32431ba | 694,179 |
def save_selected_w(Wee, selection):
"""saves the incomming weights of some selected neurons"""
w = {}
for i in selection:
w[i] = Wee[:, i]
return w | cd1cc494806509fb8676e27bdfbbe6b79833a5bd | 694,180 |
def map_to_fasttext_language(lang):
"""
Map 'zh-x-oversimplified' to 'zh' for language identification.
"""
mapping = {
'zh-x-oversimplified': 'zh'
}
return mapping.get(lang, lang) | 48890f698f42c7107b73cae46c0171e7170ff4a8 | 694,181 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.