content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import os
def get_default_number_of_electrons(calc, filename):
"""
Return the default electrons for each species.
from https://github.com/jkitchin/vasp#vaspget_default_number_of_electrons
"""
if filename is None:
filename = os.path.join(calc.directory, 'POTCAR')
calc.write_input(calc.get_atoms())
nelect = []
lines = open(filename).readlines()
for n, line in enumerate(lines):
if line.find('TITEL') != -1:
symbol = line.split('=')[1].split()[1].split('_')[0].strip()
valence = float(lines[n + 4].split(';')[1]
.split('=')[1].split()[0].strip())
nelect.append((symbol, valence))
return nelect | c28f06d848503b05e3355722d4ab4cd0eba6a7b9 | 694,622 |
def gcd(x, y):
"""greatest common divisor of x and y"""
while y:
x, y = y, x % y
return x | 211d6aeab23734a944a606b2a744bd4ab682a166 | 694,623 |
def _encode_for_display(text):
"""
Encodes strings so they can display as ASCII in a Windows terminal window.
This function also encodes strings for processing by xml.etree.ElementTree functions.
Returns an ASCII-encoded version of the text.
Unicode characters are converted to ASCII placeholders (for example, "?").
"""
return text.encode('ascii', errors="backslashreplace").decode('utf-8') | 59fa895204b764a105f0cc73f87f252c64e62871 | 694,624 |
def d_theta_parabolic(C, k, clk):
"""
A downwards concave parabola of form:
f(x) = -c(x)(x - a)
Here:
C -> x
k -> c
clk -> a
"""
return -1 * k * C * (C - clk) | ac520d33bf11ba20e1a2f72d298a2a496ec00ca7 | 694,625 |
def get_blueprints(bot, base):
"""Gets the blueprints associated with the given base.
Also returns whether or not the command is a shortcut. If the base is not
found in the commands dictionary, returns (None, None)
"""
try:
command = bot.commands[base]
is_shortcut = base in command.shortcut.bases
return (command.blueprints, is_shortcut)
except KeyError:
return (None, None) | 48b7a5bad96f64825cfe6178711d8714514d5bad | 694,626 |
def iterative_gcd(x: int, y: int):
"""while loop will continue until y = 0"""
while(y):
x,y = y, x%y
return x | 6ecaa850da3aaeffc8af46b8f5d91ac6aa0a588b | 694,627 |
def is_good_response(res, content_type='html'):
"""
Returns True if the response seems to be Content type (default html), False otherwise.
"""
content_type = res.headers['Content-Type'].lower()
return (res.status_code == 200 and content_type is not None and content_type.find(content_type) > -1) | d48ad85a7f8790be6e037d978e84756fa7091689 | 694,628 |
def replace(template, ctx):
"""Replace placeholders with their values and return the result.
Example:
>>> replace("$NAME is down", {"$NAME": "foo"})
foo is down
This function explicitly ignores "variable variables".
In this example, placeholder's value itself contains a placeholder:
>>> replace("Hello $FOO", {"$FOO": "$BAR", "$BAR": "World"})
Wrong: Hello World
Correct: Hello $BAR
>>> replace("Hello $$FOO", {"$FOO": "BAR", "$BAR": "World"})
Wrong: Hello World
Correct: Hello $BAR
In other words, this function only replaces placeholders that appear
in the original template. It ignores any placeholders that "emerge"
during string substitutions. This is done mainly to avoid unexpected
behavior when check names or tags contain dollar signs.
"""
parts = template.split("$")
result = [parts.pop(0)]
for part in parts:
part = "$" + part
for placeholder, value in ctx.items():
if part.startswith(placeholder):
part = part.replace(placeholder, value, 1)
break
result.append(part)
return "".join(result) | a2822d611ad6c7b64bff8631b450855322b1c4a5 | 694,629 |
def can_infer(num_sensed_blocked: int, num_confirmed_blocked: int, num_sensed_unblocked: int,
num_confirmed_unblocked: int):
"""
check whether we can infer or not from the current set of variables
:param num_sensed_blocked: number of sensed blocks
:param num_confirmed_blocked: number of confirmed blocks
:param num_sensed_unblocked: number of sensed unblocks
:param num_confirmed_unblocked: number confirmed unblocks
:return: True if we can infer anything otherwise False
"""
# Check precondition
assert (num_sensed_blocked >= num_confirmed_blocked) and (num_sensed_unblocked >= num_confirmed_unblocked)
# Condition whether we can infer or not
if ((num_sensed_blocked == num_confirmed_blocked) and (num_sensed_unblocked > num_confirmed_unblocked)) or \
((num_sensed_unblocked == num_confirmed_unblocked) and (num_sensed_blocked > num_confirmed_blocked)):
return True
return False | 49eea4125ffb970c89f223afd21e3e7dd3c870d7 | 694,630 |
def get_contacts(filename):
"""
Return two lists names, emails containing names and email addresses
read from a file specified by filename.
"""
names = []
emails = []
lineofdata = []
# print(names)
# print(emails)
with open(filename, mode='r', encoding='utf-8') as contacts_file:
for a_contact in contacts_file:
# print(a_contact)
lineofdata = a_contact.split()
# print(lineofdata)
# print(len(lineofdata))
# print(lineofdata[0])
# print(lineofdata[1])
names.append(lineofdata[0])
emails.append(lineofdata[1])
# print("names are ", names)
# print(emails)
return names, emails | e3f3b862fca1e290c2b353ba440913479e1265c9 | 694,631 |
def _int32_to_bytes(i):
# NOTE: This course is done on a Mac which is little-endian
"""Convert an integer to four bytes in little-endian format."""
# &: Bitwise 'and'
# >>: Right shift
return bytes((i & 0xff,
i >> 8 & 0xff,
i >> 16 & 0xff,
i >> 24 & 0xff)) | 61606a87d7f074117637b3a322dcded43a514cd0 | 694,632 |
from typing import Optional
from typing import Dict
from typing import Any
def _serialize_input_metadata_field(
input_metadata_dictionary: Optional[Dict[str, Any]]
) -> Optional[Dict[str, str]]:
"""Serialize input metadata, converting GraphQLTypes to strings."""
# It is possible to have an empty input metadata dictionary (i.e. no inputs for the query).
# Note that this is different than "None", which means no metadata was provided.
if input_metadata_dictionary == {}:
return {}
if input_metadata_dictionary is None:
return None
dictionary_value = {}
for input_name, input_type in input_metadata_dictionary.items():
dictionary_value[input_name] = str(input_type)
return dictionary_value | 0fc7120b975622fb986664047e0a5877f5194b4a | 694,633 |
from typing import OrderedDict
import os
def return_input_paths(job, work_dir, ids, *args):
"""
Returns the paths of files from the FileStore
Input1: Toil job instance
Input2: Working directory
Input3: jobstore id dictionary
Input4: names of files to be returned from the jobstore
Returns: path(s) to the file(s) requested -- unpack these!
"""
paths = OrderedDict()
for name in args:
if not os.path.exists(os.path.join(work_dir, name)):
file_path = job.fileStore.readGlobalFile(ids[name], os.path.join(work_dir, name))
else:
file_path = os.path.join(work_dir, name)
paths[name] = file_path
if len(args) == 1:
return file_path
return paths.values() | 2b4d0398e29e4ef207dd6c83f7b2024b1bd754ab | 694,634 |
import math
def calculateCents(referenceScale, newScale):
"""Takes two arrays of frequencies and calculates the cents difference. Returns 8 values in a list."""
centsList = []
for i in range(len(referenceScale)):
ratio = newScale[i] / referenceScale[i]
cents = round(((math.log(ratio) / math.log(2)) * 1200), 2)
centsList.append(cents)
return centsList | 94796baa067806b2a15fd536798adf84f5360901 | 694,635 |
def usage(err=''):
""" Prints the Usage() statement for the program """
m = '%s\n' %err
m += ' Default usage is to seach the Scan the currect directory for PE checkpoints.\n'
m += ' It will look for linked CRs in a peer directory'
m += ' create_pe_load \n'
return m | 3b9e7b34eb7b1072f02f0678dd505961bdf9aa39 | 694,636 |
import os
def build_openweathermap_base_url() -> str:
"""
Build the base url for an openweathermap current_request.
Credentials are built from environmental variables.
:return: base url string that needs lat and lng params inserted
"""
app_id = os.environ.get('OPENWEATHERMAP_APP_ID')
return f'https://api.openweathermap.org/data/2.5/weather?lat={{lat}}&lon={{lng}}&appid={app_id}&units=metric' | 68494a3f2fd4b6b67ee3cee596465f39f3242891 | 694,637 |
def is_float(n: str) -> bool:
"""
Checks if a string is a valid float
Parameters
----------
n: str
The string to check.
Returns
-------
bool
"""
try:
float(n)
return True
except ValueError:
return False | dcbcdaafc857f69bcc3b9cf728a230088adf84b7 | 694,638 |
import zipfile
import pathlib
async def make_zip_archive(
input_directory_path: str, output_file_path: str,
) -> dict:
"""
Creates zip file of a directory.
Parameters
----------
input_directory_path : str
Path to directory to be archived
output_file_path : str
Path where the output archive should be saved (should include file name)
Returns
-------
dict
Path to the output zip file
"""
with zipfile.ZipFile(output_file_path, "w") as zip:
for file in pathlib.Path(input_directory_path).rglob("*"):
zip.write(file, file.name)
return {"output_path": output_file_path} | a27729eeec533e6978047268e62b644d3ce9a038 | 694,639 |
def flowDiff(graph1, graph2):
"""
Return the difference between the flows of two graphs.
Parameters
----------
graph1: netwrorkx obj
graph2: networkx obj
Returns
-------
a dictionary with name od edge and diff in flows. graph1-graph2
"""
flowDiff_ = {}
for edge in graph1.edges():
flow1 = graph1[edge[0]][edge[1]]['flow']
flow2 = graph2[edge[0]][edge[1]]['flow']
flowDiff_[edge] = flow1-flow2
return flowDiff_ | 65d7392607b01f935dd6fe2e282083530a3513d3 | 694,640 |
import argparse
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("source_corpus_name", type=str, help="source corpus name.")
parser.add_argument("source_dir", type=str, help="source dir.")
parser.add_argument("target_corpus_name", type=str, help="target corpus name.")
parser.add_argument("target_dir", type=str, help="target dir.")
parser.add_argument("-o", "--output_dir", type=str, help="path of pickle file.")
parser.add_argument(
"-sp", "--source_parser_name", type=str, default=None, help="source parser name"
)
parser.add_argument(
"-tp", "--target_parser_name", type=str, default=None, help="target parser name"
)
parser.add_argument(
"-n", "--n_samples", type=int, default=2000, help="number of samples."
)
parser.add_argument(
"-nt",
"--n_target_samples",
type=int,
default=5,
help="number of target samples.",
)
parser.add_argument(
"--s_seed",
type=int,
default=None,
help="The random seed for sampling source utterances.",
)
parser.add_argument(
"--t_seed",
type=int,
default=None,
help="The random seed for sampling target utterances.",
)
return vars(parser.parse_args()) | bb3893a1ebe11554128c2a8e9f582b23273567aa | 694,641 |
import shutil
import subprocess as sp
from pathlib import Path
import sys
def get_git_lines(fname="line-contributors.txt"):
"""Run git-line-summary."""
contrib_file = Path(fname)
lines = []
if contrib_file.exists():
print("WARNING: Reusing existing line-contributors.txt file.", file=sys.stderr)
lines = contrib_file.read_text().splitlines()
git_line_summary_path = shutil.which("git-line-summary")
if not lines and git_line_summary_path:
print("Running git-line-summary on repo")
lines = sp.check_output([git_line_summary_path]).decode().splitlines()
lines = [l for l in lines if "Not Committed Yet" not in l]
contrib_file.write_text("\n".join(lines))
if not lines:
raise RuntimeError(
"""\
Could not find line-contributors from git repository.%s"""
% """ \
git-line-summary not found, please install git-extras. """
* (git_line_summary_path is None)
)
return [" ".join(line.strip().split()[1:-1]) for line in lines if "%" in line] | 76932ad797c0eff18879081cdb6c7038bc6cdddd | 694,643 |
def range_factorial(num):
"""Solution that iterates over a range"""
result = 1
i = iter(range(1, num + 1))
try:
while True:
result *= next(i)
except StopIteration:
pass
return result | e828a8a8139ac176a054f416798c8a817c89f2d4 | 694,644 |
def depth_of_tree(root):
"""
:param root: tree root
:return: height of tree
"""
if not root:
return 0
left = depth_of_tree(root.left)
right = depth_of_tree(root.right)
return max(left, right) + 1 | 9962f322958de30f9b69056fe4a7ef1c5eb6daa1 | 694,645 |
def tryint(s):
""" try casting s to an int. if exception occurs, return s unchanged """
try:
return int(s)
except ValueError:
return s | 8a4b221a2930494a631dcd23544176d47f4ef6e0 | 694,646 |
import mimetypes
def guess_mimetype(resource_path):
"""
Guesses the mimetype of a given resource.
Args:
resource_path: the path to a given resource.
Returns:
The mimetype string.
"""
mime = mimetypes.guess_type(resource_path)[0]
if mime is None:
return "application/octet-stream"
return mime | 66f2554387966028a4ab1cf1bf3715878b3dc246 | 694,648 |
def hex_8bit(value):
"""Converts 8bit value into bytearray.
args:
8bit value
returns:
bytearray of size 1
"""
if value > 0xff or value < 0:
raise Exception('Sar file 8bit value %s out of range' % value)
return value.to_bytes(1, 'little') | 597ecb6c9a499c7a5959f1f8ddd7300b78b06e9a | 694,649 |
import math
def F17(x):
"""Griewank function """
sum = 0
prod = 1
for i in range(len(x)):
sum += x[i]/4000
prod *= math.cos(x[i]/math.sqrt(i+1))
s = sum - prod + 1
return s | a5906c8b894d74739b39fc7c396edc3c9c307a0e | 694,650 |
def lower(s):
""" Returns the string `s` converted to lowercase. """
return s.lower() | e953cb79ee371128952a6ad7a0b7c91afc08c8b6 | 694,651 |
def strftime(value, arg):
"""
Calls an object's strftime function.
"""
if value:
return value.strftime(arg)
else:
return None | eda77a7b830a8edc292937e9cdf45442f03ebad9 | 694,652 |
from unittest.mock import call
def create_checkout_path(org, path):
"""Create a checkout path for a metacheckout"""
outdir = "%s/%s" % (path, org)
mk_status = call("mkdir -p %s" % outdir, shell=True)
return outdir | 4434701430731dc4a28c7ffc082b9c141ab995f4 | 694,653 |
def episode_title(self):
"""Return episode title."""
return self.media_metadata.get("subtitle") | 94b5be128e44334efe5f3c141e0b9dbc1bc10c3d | 694,654 |
def model_docstring(cls, header='', indent='', footer=''): #pragma: no cover
"""Return a docstring from a list of defaults.
"""
#width = 60
#hbar = indent + width * '=' + '\n' # horizontal bar
hbar = '\n'
props = cls.find_properties()
s = hbar + (header) + hbar
for key, val in props.items():
s += indent +'%-12s\n' % ("%s :" % key)
s += indent + indent + (indent + 23 * ' ').join(val.help.split('\n'))
s += ' [%s]\n\n' % str(val.default)
s += hbar
s += footer
return s | adc2b4e207e09ed127b5d39a566dd9266bc535eb | 694,655 |
def update_list(V_in, V_not):
"""Take out vertices from list V_not"""
V_out = [v for v in V_in if v not in V_not]
return V_out | 633c690a3c568d0c8f9a21659326ab516db0b462 | 694,656 |
def weed_out_short_notes(pairs, **kwargs):
"""Remove notes from pairs whose duration are smaller than the threshold"""
duration_threshold = kwargs.get('duration_threshold', 0.25)
return [(n, d) for (n, d) in pairs if d > duration_threshold] | de9caea78313e4f8d77c16ed17ae885d4bed9e33 | 694,657 |
def datetime_format(datetime_obj, fmtstring = '%Y-%m-%d'):
"""
A function to string format a datetime.datetime object.
"""
return datetime_obj.strftime(fmtstring) | a64ce81cb28c59489bff3d57751feebb818e04c5 | 694,658 |
import requests
def _component_id_from_target(target_chembl_id):
"""
Use ChEMBL API: Go to `target` endpoint and extract `component_id`.
"""
target_url = f"https://www.ebi.ac.uk/chembl/api/data/target/{target_chembl_id}.json"
response = requests.get(target_url)
response.raise_for_status()
result = response.json()
component_ids = [i["component_id"] for i in result["target_components"]]
return component_ids | 2552839c435224f44f7d962cec7bce86af770273 | 694,659 |
def start_ea(obj):
"""
Return start ea for supplied object.
:param obj: Object to retrieve start ea.
:return: start ea.
"""
if not obj:
return None
try:
return obj.startEA
except AttributeError:
return obj.start_ea | fc00c290d493ec762f165c6ff657716c8218fab3 | 694,660 |
def has_tx_succeeded(tx_receipt):
"""
We check tx has succeeded by reading the `status` on the `tx_receipt`
More info: https://docs.pantheon.pegasys.tech/en/latest/Reference/JSON-RPC-API-Objects/#transaction-receipt-object
"""
status = tx_receipt.get("status", None)
if status == 1:
return True
return False | 3b9e54efd1b269a835b4829adeca7afe7cfe51a9 | 694,661 |
def derive_nums_from_text(comments, regex):
"""Given a string, derive all related case nums using given regexes"""
return set(int(num) for num in regex.findall(comments) if num) | 76d1a69acefaeba64a9be9763f15f2da24652bea | 694,662 |
def dg_pulse(Y, pop_from, pop_to, pulse_weight):
"""
A pulse migration event
Different from merger, where the two parental populations are replaced by the
admixed population.
Here, pulse events keep the current populations in place.
"""
if pop_to in Y.pop_ids:
ind_from = Y.pop_ids.index(pop_from)
ind_to = Y.pop_ids.index(pop_to)
Y = Y.pulse_migrate(ind_from+1, ind_to+1, pulse_weight)
else:
print("warning: pop_to in pulse_migrate isn't in present pops")
return Y | 808569ec8f7c03e652e8a56cca5c5874036f3cb2 | 694,663 |
def display_banner():
""" Returns Banner """
return ('''
_ _ _ __
| | (_) |/ _|
_ __ __ _ ___ _____ _____ _ __ __| | _ __ _| | |_ ___ _ __
| '_ \ / _` / __/ __\ \ /\ / / _ \| '__/ _` | | '_ \| | | _/ _ \ '__|
| |_) | (_| \__ \__ \\\\ V V / (_) | | | (_| | | |_) | | | || __/ |
| .__/ \__,_|___/___/ \_/\_/ \___/|_| \__,_| | .__/|_|_|_| \___|_|
| | ______| |
|_| |______|_|
''') | 544ce29b0855401ec3130e7205be6cefeb307963 | 694,664 |
def core(fn=None, **flags):
"""Wrap a graph that defines a core Myia function.
The following flags can be set:
core: (default: True) Indicates that this is a core function
(only informative at the moment).
ignore_values: (default: False) Make the inferrer ignore argument
values for the parameters (leads to less specialization).
"""
flags = {
# This is a function defined in Myia's core
'core': True,
**flags,
}
def deco(fn):
fn._myia_flags = flags
return fn
if fn is None:
return deco
else:
return deco(fn) | 40c6d16da166452bb231a0885b7e45debb2c8410 | 694,665 |
def _evaluate_cubic_spline_derivative_one(x, y, r, t):
"""Evaluate one point on the first derivative of the cubic spline.
Parameters:
x : rank-1 np.array of np.float64, length 2
data x coordinates
y : rank-1 np.array of np.float64, length 2
data y coordinates
r : rank-1 np.array of np.float64, length 2
corresponding elements of output of solve_coeffs() for your data
t : float
point where to evaluate. Must be between the given x values.
Returns:
s : float
Value of the derivative at the point t.
"""
h = x[1] - x[0]
a = -2 * ( y[1] - y[0] ) + ( r[0] + r[1] ) * h
b = 3 * ( y[1] - y[0] ) - ( 2*r[0] + r[1] ) * h
c = r[0] * h
lt = (t - x[0]) / h # 0..1
return ((3*a*lt + 2*b) * lt + c)/h | ef9aa1454c412f2ec66a112d367b5171d9de4e35 | 694,666 |
import pickle
def get_challenges_for_user_id(database, user_id, ctf_channel_id):
"""
Fetch a list of all challenges a user is working on for a given CTF.
Return a list of matching Challenge objects.
"""
ctfs = pickle.load(open(database, "rb"))
ctf = ctfs[ctf_channel_id]
challenges = []
for challenge in ctf.challenges:
if user_id in challenge.players:
challenges.append(challenge)
return challenges | bd2c048ca88aad630bfe9fda7c37924cf93c611b | 694,667 |
def make_gmm_unaries(X, fg_gmm, bg_gmm):
"""
Make unaries by log probability under GMM.
Take
X: data in N x K ndarray where N is no. samples and K is no. features
fg_gmm, bg_gmm: fitted sklearn.mixture.gmm
Give
fg_unary, bg_unary: log probabilities under the fg and bg gmms resp.
"""
fg_un, _ = fg_gmm.score_samples(X)
bg_un, _ = bg_gmm.score_samples(X)
return fg_un, bg_un | d69b942d465262736c3d209be7e2de9a4ce2cf4b | 694,669 |
import os
def resolve_resource_file(res_name):
"""Convert a resource into an absolute filename.
Resource names are in the form: 'filename.ext'
or 'path/filename.ext'
The system wil look for ~/.mycroft/res_name first, and
if not found will look at /opt/mycroft/res_name,
then finally it will look for res_name in the 'mycroft/res'
folder of the source code package.
Example:
With mycroft running as the user 'bob', if you called
resolve_resource_file('snd/beep.wav')
it would return either '/home/bob/.mycroft/snd/beep.wav' or
'/opt/mycroft/snd/beep.wav' or '.../mycroft/res/snd/beep.wav',
where the '...' is replaced by the path where the package has
been installed.
Args:
res_name (str): a resource path/name
"""
# First look for fully qualified file (e.g. a user setting)
if os.path.isfile(res_name):
return res_name
# Now look for ~/.mycroft/res_name (in user folder)
filename = os.path.expanduser("~/.mycroft/" + res_name)
if os.path.isfile(filename):
return filename
# Next look for /opt/mycroft/res/res_name
filename = os.path.expanduser("/opt/mycroft/" + res_name)
if os.path.isfile(filename):
return filename
# Finally look for it in the source package
filename = os.path.join(os.path.dirname(__file__), '..', 'res', res_name)
filename = os.path.abspath(os.path.normpath(filename))
if os.path.isfile(filename):
return filename
return None | 40b593e9f24315ac4bb643704a50371533878481 | 694,670 |
def d(value: bytes) -> str:
"""
Decode a bytestring for interpolating into an error message.
"""
return value.decode(errors="backslashreplace") | 5b37b195cc5a462c8eb665b0fd77cbe823f57e00 | 694,672 |
def impact_seq(impact_list):
"""String together all selected impacts in impact_list."""
impact_string = ''
connector = '-'
for impact in impact_list:
impact_string += connector + impact.iname.strip()
return impact_string | 335a879c17e263690744c947b11be872c5ccf663 | 694,673 |
def new_kernel ( lines = 3, columns = 3 ):
"""Cria kernel a partir de lines e columns (linhas e colunas)
Args:
lines: número de linhas do kernel, default = 3
columns: número de colunas do kernel, default = 3
Returns:
kernel (lines x columns)
"""
return [ [ line * 0 + 1 for line in range( lines ) ] for column in range( columns ) ] | 799a9c3187aaf06553bf76881276f4593900ff26 | 694,675 |
import re
def isIntStr(numstr):
""" Return True if the given string contains ony digits """
if re.match("^[0-9]+$", str(numstr)):
return True
return False | c0f21f5fd54bda7b7d0129b18d1a814e2a701a21 | 694,676 |
def url_to_path(url: str):
"""
Convert URL to path
"""
return url.replace("://", "___").replace(".", "__").replace("/", "_") | 518fa3647d53838a9f7a68e175778f191cc82896 | 694,677 |
import subprocess
import re
def ping_to_site(host_to_ping, host_name, latency):
"""
Comprueba ping a un host y verificar su tiempo de latencia
:param host_to_ping: HOST a hacer ping para probar conectividad
:param host_name: Nombre del HOST a hacer ping
:param latency: Tiempo de latencia en ms, necesario para asegurar
conexión al HOST
:return: Tupla (bool, int) si se cumplen condiciones de ping, o
excepción según problemas encontrados
"""
if host_to_ping:
ping_result = subprocess.run(["ping", host_to_ping, "-c", "1", "-W", "2"], stdout=subprocess.PIPE)
if ping_result.returncode == 0:
result = ping_result.stdout.decode()
ping_time = float(re.findall(r'time=([0-9.]+)', result)[0])
if ping_time < latency:
return True, ping_time
raise Exception("La conexión con {host_name} es demasiado lenta".format(host_name=host_name))
raise Exception("No se puede establecer conexión con el servidor {host_name}".format(host_name=host_name))
raise Exception("Debe ingresar un dominio para poder hacer PING") | ff15ad6cd39eb1e8c5b0e0da6eaefa4734bda0b1 | 694,679 |
import requests
def verify_physionet_credentials(username, password):
"""Returns True if the username and password are valid for the physionet.org website."""
url = "https://physionet.org/works/MIMICIIIClinicalDatabase/files/"
r = requests.get(url, auth=(username, password))
return True if r.status_code == 200 else False | 2821de8abf6cb1d0f9363bbb0440f9a1ae65bc99 | 694,680 |
def _find_start_entry(line, n):
"""Find the starting character for entry ``n`` in a space delimited ``line`` (PRIVATE).
n is counted starting with 1.
The n=1 field by definition begins at the first character.
Returns
-------
starting character : str
The starting character for entry ``n``.
"""
# This function is used by replace_entry
if n == 1:
return 0 # Special case
# Count the number of fields by counting spaces
c = 1
leng = len(line)
# Initialize variables according to whether the first character
# is a space or a character
if line[0] == " ":
infield = False
field = 0
else:
infield = True
field = 1
while c < leng and field < n:
if infield:
if line[c] == " " and line[c - 1] != " ":
infield = False
else:
if line[c] != " ":
infield = True
field += 1
c += 1
return c - 1 | 60b609b099bae2aa6e891008a1e49c7083c6de32 | 694,681 |
import numpy
def derivation(array, order=1):
"""
Returns a derivation of input <array>.
:param array: input array
:type array: numpy.ndarray
:param order: order of derivation, defaults to 1
:type order: int, optional
:return: derivation
:rtype: numpy.ndarray
"""
return numpy.diff(array, order, axis=0) | ff2a7fa279887aa065915ed745d7c0533e4258cb | 694,682 |
def startswith_whitespace(text):
"""Check if text starts with a whitespace
If text is not a string return False
"""
if not isinstance(text, str):
return False
return text[:1].isspace() | 4c1acdfea37fc1ffdcde13fb6c68567489d91b30 | 694,683 |
import argparse
def parse(arg=None):
"""Define configuration of postprocess"""
parser = argparse.ArgumentParser()
parser.add_argument('--bin_path', type=str, default='./result_Files/')
parser.add_argument('--target_path', type=str, default='./result_Files/')
return parser.parse_args(arg) | 2221ac32f908c8e26473da7035737874b93d0cf5 | 694,684 |
def get_df_from_excel(spark, file_path, sheet_name):
"""
This method is intended to create a dataframe form excel file
:param spark: spark Session
:param file_path: hdfs path of the file
:return: dataframe
"""
return (spark.read.format("com.crealytics.spark.excel")
.option("useHeader", "true")
.option("treatEmptyValuesAsNulls", "true")
.option("inferSchema", "true")
.option("addColorColumns", "False")
.option("maxRowsInMey", 2000)
.option(sheet_name, "Import")
.load(file_path)) | c3c44cb3d68312d65e622463981496f6df9af344 | 694,686 |
def finalize(mt):
"""Drops entry fields no longer needed for combining.
Note
----
You will not be able to run :func:`.combine_gvcfs` with the output of this
function.
"""
return mt.drop('gvcf_info') | aecdfe238449d76f6d4133abd7586039be488514 | 694,687 |
import argparse
def find_engine_override(argv):
"""Since the bootstrap process attempts to defer all logic to the recipes-py
repo, we need to be aware if the user is overriding the recipe_engine
dependency. This looks for and returns the overridden recipe_engine path, if
any, or None if the user didn't override it."""
PREFIX = 'recipe_engine='
p = argparse.ArgumentParser(add_help=False)
p.add_argument('-O', '--project-override', action='append')
args, _ = p.parse_known_args(argv)
for override in args.project_override or ():
if override.startswith(PREFIX):
return override[len(PREFIX):]
return None | ff0147c8fe75c834b7dd71f1c3b9ce3ead98feb0 | 694,688 |
def triplet(n):
"""Find the Pythagorean triplet who's sum equals a given number."""
for c in range(n - 3, 1, -1):
for b in range(c - 1, 1, -1):
a = (c**2 - b**2)**.5
if a + b + c == n:
return [c, b, int(a)]
return False | 0f4121ac169d8830a776f4fb44d9716077288cb8 | 694,689 |
import subprocess
def get_nc_dump_string_by_ncdump(nc_file_name):
"""
(string) -> string
Return: string create by running "ncdump -h" command for netcdf file.
"""
try:
process = subprocess.Popen(['ncdump', '-h', nc_file_name], stdout=subprocess.PIPE)
nc_dump_string = process.communicate()[0]
except Exception:
nc_dump_string = ''
return nc_dump_string | 66659e9b751ac58bb6b323586e9281b6ecf2cfc8 | 694,690 |
import os
import time
def connected_to_internet():
"""
This function will return true or false whether there is currently
an internet connection or not.
"""
# This represents the terminal command that will ping google.com to seek
# a response
ping = "ping -c2 google.com"
# Define the output string we are aiming to read
output = ""
# Define a String that should only be present if connected to the internet
ping_success = "--- google.com ping statistics ---"
# If the device is starting up, it may not immediately be connected to the
# WiFi, even if the credentials have been set. Loop through a few times to
# check adequately.
for x in range(0, 15):
# Store output of the ping
output = os.popen(ping).read()
if ping_success in output:
break
# Sleep momentarily before attempting again
time.sleep(0.75)
# Check if the success String is present in the output
if ping_success in output:
print("Connected to internet.")
return True
# Else, there is no internet connection
else:
print("Not connected to internet.")
return False | 4fd05fc0a84c395f0d8fd86ce4a106bd1045efba | 694,691 |
def move_wonpts(self):
""" return the best move to play """
best = -1000
bm = None
#print("fuck")
for m in self.t:
if self.t[m].n_playout == 0:
continue
u = self.t[m].wonpts/self.t[m].n_playout
#print("played {} times, {:.2f}% winrate, {:.2f} pts/game".format(self.t[m].n_playout, self.t[m].n_won/self.t[m].n_playout*100, self.t[m].wonpts/self.t[m].n_playout))
if u == -1:
return m
if u > best:
bm = m
best = u
return bm | 746801015cf4ac8869e9b7a4e26c56a53c282788 | 694,692 |
import six
def isHTML(str):
"""Try to determine whether str is HTML or not."""
if isinstance(str, six.binary_type):
try:
str = str.decode()
except UnicodeDecodeError:
return False
s = str.lstrip().lower()
if s.startswith('<!doctype html'):
return True
if s.startswith('<html') and (s[5:6] in ' >'):
return True
if s.startswith('<!--'):
idx = s.find('<html')
return idx > 0 and (s[idx+5:idx+6] in ' >')
else:
return False | c1245b023b2647e312085b68528a3b0fef7a702f | 694,693 |
import os
def is_module(directory):
"""A directory is a module if it contains an ``__init__.py`` file.
"""
return os.path.isdir(directory) and '__init__.py' in os.listdir(directory) | 4945a4fd0145735a578ecbd43c523206b95dc8fd | 694,694 |
def to_human(seconds):
"""Converts seconds to human readable (weeks, days, hours) form.
:param int seconds: number of seconds.
:return: (weeks, days, hours) equivalent to the seconds.
:rtype: str
"""
weeks = seconds / (7 * 24 * 60 * 60)
days = seconds / (24 * 60 * 60) - 7 * weeks
hours = seconds / (60 * 60) - 7 * 24 * weeks - 24 * days
return '%sw, %sd, %sh' % (weeks, days, hours) | c6e68c58a1a57771c1d438abec209ac15898c369 | 694,695 |
import os
def load_chrom_sizes(reference_genome):
"""
Load chromosome sizes for a reference genome
"""
my_path = os.path.abspath(os.path.dirname(__file__))
rg_path = f'{my_path}/utils/{reference_genome}.chrom.sizes'
f = open(rg_path)
lengths = {}
for line in f:
[ch, l] = line.strip().split()
lengths[ch] = int(l)
return lengths | 558d9aed272256767cba559a87ef606a6590f606 | 694,696 |
def scalePixelData(pixelData, rInt, rSlope):
"""Scales pixel image values linearly by rescale values"""
scaledPixelData = []
for pixelRow in pixelData:
scaledPixelRow = []
for pixelValue in pixelRow:
pixelScale = (rSlope * pixelValue) + rInt
scaledPixelRow.append(pixelScale)
scaledPixelData.append(scaledPixelRow)
return scaledPixelData | 7c5220384b0eebf80e2711b838f0f7d4c1711fdf | 694,697 |
def tlv_file_xml_mapped(tlv_data_xml_mapped, tmp_path):
"""Return the path to an mapped XML file containing the test tree."""
path = tmp_path / "expected_mapped.xml"
path.write_bytes(tlv_data_xml_mapped)
return path | 314a0a55112259940b6dcbd7d72beb69da400134 | 694,698 |
from typing import List
def men_from_boys(arr: List[int]) -> list:
"""
Sort out the men from the boys.
Men are the Even numbers and Boys are the odd.
Return an array/list where Even numbers
come first then odds.
Since, Men are stronger than Boys,
then Even numbers in ascending order
while odds in descending.
:param arr:
:return:
"""
boys: List[int] = list()
men: List[int] = list()
for a in arr:
if a % 2 == 0:
if a not in men:
men.append(a)
else:
if a not in boys:
boys.append(a)
return sorted(men) + sorted(boys, reverse=True) | 60a0f7f7362d85674a5becc14308cd84a3fe0f12 | 694,699 |
def has_cycle(head):
""""
boolean function to identify a cycle in a linked list
"""
print('\nIn has_cycle')
count = {}
if head != None:
while head:
if head.next in count:
return True
else:
count[head] = 1
head = head.next
return False
# space complexity is O(n) | 6c03759b93afbd09878b6cfaf3ae4e58c9a4f555 | 694,700 |
def bissexto(ano):
"""Verifica se um ano é bissexto.
Retorna True se ano for bissexto ou False caso contrário.
Definição de ano bissexto:
Um ano é bissexto se for divisível por 400 ou então
se for divisível por 4 e, ao mesmo tempo, não for divisível por 100.
"""
return ano % 400 == 0 or ano % 4 == 0 and ano % 100 != 0 | 388880697cdebe9c29b8815edb99b9fda80a2210 | 694,701 |
from typing import ByteString
def make_buffer_mutable(data: ByteString):
"""
Returns a mutable version of the input data. Already mutable inputs are returned
as themselves, i.e. no copy operation occurs in these cases.
"""
if isinstance(data, bytearray):
return data
if isinstance(data, memoryview) and not data.readonly:
return data
return bytearray(data) | 62a80b83a8d1ce141673e3c6b91d34c0e9cfa89c | 694,702 |
import re
def find_all(item, items, regex=False, regex_flags=None):
"""
Finds the indexes and values for all values matching a given item.
:param item: the value (or pattern) to match/find.
:param items: an iterable of items to match against.
:param regex: If True, item will be treated as a regex pattern.
:param regex_flags: Optional flags for re.search().
:return: an iterable of (index, value) tuples.
>>> find_all('own',['Is', 'that', 'your', 'own', 'brown', 'cow'])
[(3, 'own')]
>>> find_all('own',['Is', 'that', 'your', 'own', 'brown', 'cow'], regex=True)
[(3, 'own'), (4, 'brown')]
>>> find_all('^own$',['Is', 'that', 'your', 'own', 'brown', 'cow'], regex=True)
[(3, 'own')]
>>> find_all('ow',['How', 'now', 'brown', 'cow'])
[]
>>> find_all('ow$',['How', 'now', 'brown', 'cow'], regex=True)
[(0, 'How'), (1, 'now'), (3, 'cow')]
>>> find_all('[a-z]ow(?![\w])',['How', 'now', 'brown', 'cow'], regex=True)
[(1, 'now'), (3, 'cow')]
>>> find_all('(?<!\w)[a-z]ow',['How', 'now', 'brown', 'cow'], regex=True, regex_flags=re.IGNORECASE)
[(0, 'How'), (1, 'now'), (3, 'cow')]
>>> find_all('(?<=\w)[a-z]ow',['How', 'now', 'brown', 'cow'], regex=True, regex_flags=re.IGNORECASE)
[(2, 'brown')]
"""
if regex:
flags = regex_flags or 0
return [(index, value) for index, value in enumerate(items) if re.search(item, value, flags=flags)]
else:
return [(index, value) for index, value in enumerate(items) if value == item] | bf9e78ef94261c0ee88162e6a1be85a8cdb1dd54 | 694,703 |
def mark_doc(doc, wids, mark=None, pos=None):
"""
Given a list of words and a set of word positions, mark the words in those positions.
:param list doc: a list of words (strings)
:param set wids: the positions of the words to be marked
:param string mark: a string that sets the mark that will be applied
to each of the selected words
:param string pos: can be one of {"prefix", "suffix"}
:return: the marked list of words
"""
if mark is None:
mark = "NEG"
if pos is None:
pos = "suffix"
marked_doc = []
for i, tok in enumerate(doc):
if i in wids:
if pos == "prefix":
word = mark + "_" + tok
else:
word = tok + "_" + mark
marked_doc.append(word)
else:
marked_doc.append(tok)
return marked_doc | 481fb2d4ca8828181288d776c6f4b73e82f0443a | 694,706 |
def unknown_char(char_name, id_ep):
"""Transforms character name into unknown version."""
if "#unknown#" in char_name:#trick when re-processing already processed files
return char_name
return f"{char_name}#unknown#{id_ep}" | 061683efd275335e58129225fe4bc9dabc044c9b | 694,707 |
def _copy_list(seq):
"""Recursively copy a list of lists"""
def copy_items(seq):
for item in seq:
if isinstance(item, list):
yield list(copy_items(item))
else:
yield item
return list(copy_items(seq)) | 2e179ed338bf9b5417772509ca27a84608c240d9 | 694,708 |
def make_vij(key:str,sent:str)->str:
"""
第一引数に鍵、第二引数に平文を受け取りヴィジュネル暗号を返します。
"""
x,y=0,0
ang=""
key=key.lower()
sent=sent.lower()
while y<len(sent):
if ord(sent[y])>=ord('a') and ord(sent[y])<=ord('z'):
ang+=chr(ord('A')+(ord(sent[y])+ord(key[x])-ord('a')*2)%26)
x+=1
else:
ang+=sent[y]
y+=1
x%=len(key)
return ang | 03853924d15aff5187359761bd1f5f15e33eebf2 | 694,709 |
def gumbel_parameter_converter(miu, sigma):
"""
NAME: gumbel_parameter_converter
VERSION: 0.0.1
AUTHOR: Yan Fu, Ruben V. Coline
DATE: 27 Sept 2018
DESCRIPTION:
This function is used in conjunction of the scipy.stats.gumbel distribution, converts mean and standard deviation
(sigma) of samples x to location and scale which is used in scipy.stats.gumbel_r function.
:param miu: mean value of samples x
:param sigma: standard deviation of samples x
:return: location and scale
EXAMPLE:
"""
# parameters Gumbel W&S
alpha = 1.282 / sigma
u = miu - 0.5772 / alpha
# parameters Gumbel scipy
scale = 1 / alpha
loc = u
return loc, scale | 910ae757862a9107cbf59ceda37a9825554f1859 | 694,711 |
def calculate_occlusion(ray, obj, light, objects):
"""
Calculate if there is an object between the light and object
Args:
ray: A ray starting in the hit point with direction to the light
obj: The object where the hit point is
light: A source of light to calculate if it's occluded
objects: The objects in the scene
Returns:
bool: If there is an occlusion or not
"""
# Check for occlusion
# 1. Shoot ray from point to light
# 2. Check collision with other objects
# 3. If there is one between the hit point and the light, there is occlusion
light_distance = light.get_distance(ray.pr)
for other_obj in objects:
if other_obj == obj:
continue
shadow_t = ray.intersect(other_obj)
if 0 < shadow_t <= light_distance:
return True
return False | c15acf785f8baf72da64307380cd36d7de6b6ef8 | 694,713 |
def create_label_lists(label_path):
"""
creates a label to encoding dict and a reverse dict via an output
label txt file generated by retraining.py
:param label_path: output label file name
:param json_path: path to product json file
:return: label to encoding dict and a reverse of that
"""
with open(label_path) as f:
labels = f.readlines()
labels = [l.strip() for l in labels]
label2idx = {}
idx2label = {}
for label_idx, label_name in enumerate(labels):
label2idx[label_name] = label_idx
idx2label[label_idx] = label_name
return label2idx, idx2label | db0557488bb32fa7de468b4a9011e961e977622b | 694,714 |
def get_integer(dictionary, key):
"""Gets value of a key in the dictionary as a integer.
Args:
dictionary (dict): A dictionary.
key (str): A key in the dictionary.
Returns: A integer, if the value can be converted to integer. Otherwise None.
"""
val = dictionary.get(key)
try:
return int(val)
except ValueError:
return None | 503f168ff6ecac637fde28dae3c6fc33554b5e26 | 694,715 |
from typing import Tuple
def left(x: int, y: int) -> Tuple[int, int]:
"""Move one step to the left"""
return x - 1, y | ad16a27149980c532a72970fade1b09843168a82 | 694,716 |
import re
def extract_bonds_and_angle_info(force_field):
""" Given a force field files, extracts the values use for equilibrium
bond lengths and angles. """
info = {"bonds":{},
"angles": {}}
bond_regex = r"^(.{2}-.{2})\s+\S+\w+\s+(\S+)"
angle_regex = r"^(.{2}-.{2}-.{2})\s+\S+\w+\s+(\S+)"
with open(force_field, "r") as f:
text = f.read()
# Extract bond info
matches = re.finditer(bond_regex, text, re.MULTILINE)
for m in matches:
atoms = m.group(1)
length = m.group(2)
info["bonds"][atoms] = float(length)
# Extract angle info
matches = re.finditer(angle_regex, text, re.MULTILINE)
for m in matches:
atoms = m.group(1)
angle = m.group(2)
info["angles"][atoms] = float(angle)
return info | ee314b68f9e2dfecb3b94c4493594adc30668d3e | 694,717 |
def ymd2jd(year, month, day):
"""
Converts a year, month, and day to a Julian Date.
This function uses an algorithm from the book "Practical Astronomy with your
Calculator" by Peter Duffet-Smith (Page 7)
Parameters
----------
year : int
A Gregorian year
month : int
A Gregorian month
day : int
A Gregorian day
Returns
-------
jd : float
A Julian Date computed from the input year, month, and day.
"""
if month == 1 or month == 2:
yprime = year - 1
mprime = month + 12
else:
yprime = year
mprime = month
if year > 1582 or (year == 1582 and (month >= 10 and day >= 15)):
A = int(yprime / 100)
B = 2 - A + int(A / 4.0)
else:
B = 0
if yprime < 0:
C = int((365.25 * yprime) - 0.75)
else:
C = int(365.25 * yprime)
D = int(30.6001 * (mprime + 1))
return B + C + D + day + 1720994.5
#def ymd2weekday(year, month, day):
""" Returns the day of the week for the specified year, month, and day """
jd = ymd2jd(year, month, day)
A = (jd + 1.5) / 7.0
return dayNames[round((A - int(A)) * 7.0)] | 7f93c1eef14d3e75764329748d4646e41ba6fea9 | 694,718 |
import os
def _find_only_folder_with_metadata(path):
"""Looks through a bundle for a single folder that contains a metadata file and
returns that folder's name if found"""
files_in_path = os.listdir(path)
if len(files_in_path) > 2 and 'metadata' in files_in_path:
# We see more than a couple files OR metadata in this folder, leave
return None
for f in files_in_path:
# Find first folder
folder = os.path.join(path, f)
if os.path.isdir(folder):
# Check if it contains a metadata file
if 'metadata' in os.listdir(folder):
return folder | 83ef9d62e9e46cd35ddd18c5ab6b71de187ec51b | 694,719 |
import json
def json_to_key_val_dict(fn, key_name, val_name):
"""Input json must contain a list of tuples [(a0, b0), (a1, b1), ...],
return ([a0, a1, ...], [b0, b1, ...])
fn --- json file containing a list of (key, value) tuples, e.g., [(key0, val0), ...]
"""
tuples = json.load(open(fn, 'r')) # load [(a0, b0), ..., ]
try:
keys, vals = [str(t[0]) for t in tuples], [str(t[1]) for t in tuples]
except IndexError as e:
raise ValueError('Json file must only contain a list of (%s, %s) tuples: %s, %s' % (key_name, val_name, fn, str(e)))
if len(set(keys)) != len(keys):
raise ValueError('{k} objects in json file {f} are not unique: {keys}.'.format(k=key_name, f=fn, keys=keys))
return dict(zip(keys, vals)) | ffffa436886c50d7903178953066cd6a5d069285 | 694,720 |
def _get_default_var_dicts_planetos(dataset):
"""
Returns dictionary mapping PlanetOS variable names to OpenOA variable names for a particular dataset
Args:
dataset (:obj:`string`): Dataset name ("merra2" or "era5")
Returns:
:obj:`dict`: Dictionary mapping reanalysis variable names from PlanetOS to standard OpenOA variable names
"""
dataset = dataset.lower().strip()
if dataset == "merra2":
var_dict = {"U50M": "u_ms", "V50M": "v_ms", "T2M": "temperature_K", "PS": "surf_pres_Pa"}
elif dataset == "era5":
var_dict = {
"eastward_wind_at_100_metres": "u_ms",
"northward_wind_at_100_metres": "v_ms",
"air_temperature_at_2_metres": "temperature_K",
"surface_air_pressure": "surf_pres_Pa",
}
else:
raise ValueError('Invalid dataset name. Currently, "merra2" and "era5" are supported.')
return var_dict | b93e2b5655003ea81900e013d3c24545baae690c | 694,722 |
from typing import Union
from typing import List
from typing import Any
from typing import Dict
def convert_to_schema(schema: Union[List[Any], Dict[str, Any], Any]) -> Dict[str, Any]:
"""Recursively convert a json-like object to OpenAPI example response."""
if isinstance(schema, list):
return {
"type": "array",
"items": convert_to_schema(schema[0] if len(schema) > 0 else "???"),
}
if isinstance(schema, dict):
return {
"type": "object",
"properties": {str(key): convert_to_schema(value) for key, value in schema.items()},
}
return {
"type": "string",
"default": str(schema),
} | dbb5010b50f81bb86d668850b70631cad34c9407 | 694,723 |
def transformedCoordinatesWithMatrice(mol,matrice):
""" for a nodeset, this function returns transformed coordinates.
This function will use the pickedInstance attribute if found.
@type mol: MolKit node
@param mol: the molecule to be transfromed
@type matrice: 4x4array
@param matrice: the matrix to apply to the molecule node
@rtype: array
@return: the transformed list of 3d points from the molecule atom coordinates
"""
vt = []
#transfo = matrice#Numeric.transpose(Numeric.reshape(pat.mat_transfo,(4,4)))
scaleFactor = 1.#pat.scaleFactor
#for node in nodes:
#find all atoms and their coordinates
coords = mol.allAtoms.coords# nodes.findType(Atom).coords
#g = nodes[0].top.geomContainer.geoms['master']
# M1 = g.GetMatrix(g.LastParentBeforeRoot())
# apply the AR transfo matrix
M = matrice#Numeric.dot(transfo,M1)
for pt in coords:
ptx = (M[0][0]*pt[0]+M[0][1]*pt[1]+M[0][2]*pt[2]+M[0][3]) /scaleFactor
pty = (M[1][0]*pt[0]+M[1][1]*pt[1]+M[1][2]*pt[2]+M[1][3]) /scaleFactor
ptz = (M[2][0]*pt[0]+M[2][1]*pt[1]+M[2][2]*pt[2]+M[2][3]) /scaleFactor
vt.append( (ptx, pty, ptz) )
return vt | 50ea9e73cfb48e0002e7a16eeb8d141eadda3690 | 694,724 |
import socket
def get_ip():
"""
Get the IP by connecting to the internet and watching which interface is being used
:return: Local IP address of the main network interface
"""
return socket.gethostbyname(socket.gethostname()) | f772364375eeea2fa155a85fdf61186df26ad20c | 694,725 |
def checkmdscale_none(md, tocheck=['ZScale'], replace=[1.0]):
"""Check scaling entries for None to avoid issues later on
:param md: original metadata
:type md: dict
:param tocheck: list with entries to check for None, defaults to ['ZScale']
:type tocheck: list, optional
:param replace: list with values replacing the None, defaults to [1.0]
:type replace: list, optional
:return: modified metadata where None entries where replaces by
:rtype: [type]
"""
for tc, rv in zip(tocheck, replace):
if md[tc] is None:
md[tc] = rv
return md | 88dc27b1e7bf9f9a3b34ef863c6b0943d6a9b82e | 694,726 |
def match_any_re(regex_list, value):
"""Given a list of pre-compiled regular expressions,
return the first match object. Return None if there's no Match"""
for regex in regex_list:
match = regex.fullmatch(value)
if match:
return match.groupdict() | fcb1ce9530ac990dd1048eb62883adcf9a06ab6a | 694,730 |
def part2(captcha):
"""
>>> part2("1212")
6
>>> part2("12131415")
4
>>> part2(read_input())
1220
"""
half = len(captcha) // 2
rotated = captcha[half:] + captcha[:half]
total = 0
for (a, b) in zip(captcha, rotated):
if a == b:
total += int(a)
return total | c56c2262216de81377fc5d2abfff20b15d6b198e | 694,732 |
def compare_containers(cont_a, cont_b):
"""
compares whether two containers have the same content regardless of their
type. eg, compares [1, 2, 3] and (1, 2., 3.) as True
Keyword Arguments:
cont_a -- one container
cont_b -- other container
"""
if cont_a != cont_b:
# pylint: disable=broad-except
# pylint: disable=multiple-statements
try:
if not len(cont_a) == len(cont_b):
return False
for a_item, b_item in zip(cont_a, cont_b):
if not a_item == b_item:
return False
except Exception:
return False
return True | 6fa3aefaf40f0cf5ea0f453f7f527714ef98848c | 694,733 |
def get_album_from_html(self, html):
"""Scrapes the html parameter to get the album name of the song on a Genius page"""
parse = html.findAll("span")
album = ''
for i in range(len(parse)):
if parse[i].text == 'Album':
i += 1
album = parse[i].text.strip()
break
return album | a7cf940227dfd2dd9f2c7fe44073ef02489e6db1 | 694,734 |
def identity(mask):
"""
This is identity function that can be used as an argument of functions :func:`set_encoder` and :func:`set_decoder`.
:param mask: input mask
:type mask: numpy.ndarray
:return: the same mask
:rtype: numpy.ndarray
Example:
.. code-block:: python
ACDC.set_encoder(ACDC.identity)
ACDC.set_decoder(ACDC.identity)
"""
return mask | b2082ffb09501a393776e3a1b960b4b532f3e465 | 694,735 |
import json
async def assert_api_post_response(cli, path: str, payload: object = None, status: int = 200, expected_body: object = None):
"""
Perform a POST request with the provided http cli to the provided path with the payload,
asserts that the status and data received are correct.
Expectation is that the API returns text/plain format json.
Parameters
----------
cli : aiohttp cli
aiohttp test client
path : str
url path to perform POST request to
payload : object (default None)
the payload to be sent with the POST request, as json.
status : int (default 200)
http status code to expect from response
expected_body : object
An object to assert the api response against.
Returns
-------
Object or None
returns the body of the api response if no data was provided to assert against, otherwise returns None
"""
response = await cli.post(path, json=payload)
assert response.status == status
body = json.loads(await response.text())
if expected_body:
assert body == expected_body
else:
return body | a09185ff957446ce99cb1bcd93d5699890b72b52 | 694,736 |
def diffangles(a1, a2):
"""
Difference between two angles in 0-360 degrees.
:param a1: angle 1
:param a2: angle 2
:return: difference
"""
return 180 - abs(abs(a1 - a2) - 180) | 8f17b4f1bdd822082b552aa14907bcdb4b185a4f | 694,737 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.