content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_long_description():
"""
Read long description in a way that doesn't break if README.rst doesn't exist (for example in the docker image)
"""
try:
description = open('README.rst').read()
except FileNotFoundError:
description = ''
return description | bd0f8eb3ee8aeef71cda78fbbbc244d77c557ca3 | 691,512 |
def find_by_text(browser, tag, text):
"""
Encontrar o elemento com o texto `texto`.
Argumentos:
- browser = Instancia do browser [firefox, chrome, ...]
- text = conteúdo que deve estar na tag
- tag = tag onde o texto será procurado
"""
elements = browser.find_elements_by_tag_name(tag)
for element in elements:
if text == element.text:
return element | 20add2b711a753996664d9cfb4cbd0f0cff6266c | 691,516 |
def vhdl_bit_name(reg_name, bit_name, number):
"""
Returns a string with a VHDL constant delaration of a bit name and it's
associted number.
"""
return "constant {}_{} : integer := {};\n".format(reg_name, bit_name, number) | f31585c9b019a2398a5d10620af5137f5aeb3de2 | 691,517 |
import os
def mixed_api_def() -> str:
"""Return path to API definition that causes status code conformance failure."""
dir = os.path.dirname(os.path.abspath(__file__))
return os.path.join(dir, "mock_definitions", "mixed.yaml") | ccd1c7c095df2506281696fc6c8ace4d3570a10b | 691,518 |
from pydantic import BaseModel # noqa: E0611
def dictify(item):
"""
Recursively dictifies input
"""
if isinstance(item, BaseModel):
return item.dict(exclude_unset=False)
if isinstance(item, dict):
# cast potential dict subclasses into plain old dicts
return {k: dictify(v) for k, v in item.items()}
if isinstance(item, list):
return [dictify(v) for v in item]
return item | 79b7f4b89ec26e0070dac13b158d678e94034bd5 | 691,520 |
def getClikedPos(pos, rows, width):
""" Get the col and row of the cliked Spot """
gap = width // rows
y, x = pos
row = y // gap
col = x // gap
return row, col | bf2aaf95bcbce3d71ae6e51b02e2a7f7f694b49f | 691,521 |
def sum2(n):
"""
Take an input of n and return the sum of the numbers from 0 to n
"""
return (n * (n + 1)) / 2 | 10a10f276eaed6894470624c4d7460cdbf057906 | 691,522 |
import io
def loadRefSeq(fname):
"""
load polio reference sequence
"""
fileIN = io.open(fname, 'r')
line = fileIN.readline()
seq = []
while line:
current_line = line.rstrip('\n\r')
seq.append(str( current_line) )
line = fileIN.readline()
return seq | e40fdeb682168f4a7001f7355fe053b6cc1bb235 | 691,523 |
import random
def create_welcome_message(username):
"""
Creates a somewhat random welcome message for the user to be displayed
"""
general_greetings_list = ["hello", "hi", "welcome"]
secondary_statement_list = ["hope you're having a great day!",
"miao miao miao (that's cat for have a good day)!",
"enjoy!",
"good luck!",
"happy writing!"]
first = random.choice(general_greetings_list)
uname = username.capitalize()
second = random.choice(secondary_statement_list)
msg = first + " " + uname + "! " + second
return msg | 2d2be96f6d05d5cc6fa480ddc6f7758ba14a12ed | 691,524 |
def find_unique(lst: list) -> list:
"""Find the unique numbers in a list."""
return [i for i in lst if lst.count(i) < 2] | 3a40cf7caa076238a128cee2292d5ddde5970edb | 691,525 |
def find_linear_function_parameters(p1, p2):
"""
Find parameters of a linear function connecting first_point and second_point
>>> find_linear_function_parameters((1, 1), (0, 0))
Traceback (most recent call last):
...
AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates
>>> find_linear_function_parameters((0, 1), (0, 0))
Traceback (most recent call last):
...
AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates
>>> find_linear_function_parameters((1, 0), (0, 0))
Traceback (most recent call last):
...
AssertionError: first_point coordinates need to be smaller or equal to second_point coordinates
>>> find_linear_function_parameters((50.0, 30.0), (50.0, 100.0))
Traceback (most recent call last):
...
AssertionError: first_point and second_point cannot lie on a horizontal or vertical line
>>> find_linear_function_parameters((50.0, 30.0), (100.0, 30.0))
Traceback (most recent call last):
...
AssertionError: first_point and second_point cannot lie on a horizontal or vertical line
>>> find_linear_function_parameters((50.0, 20.0), (110.0, 110.0))
(1.5, -55.0)
>>> a, b = find_linear_function_parameters((50.0, 30.0), (110.0, 110.0))
>>> np.testing.assert_almost_equal(a, 1.333333333333333)
>>> np.testing.assert_almost_equal(b, -36.666666666666664)
>>> find_linear_function_parameters((50.0, 30.0), (50.0, 30.0))
(1, 0)
"""
assert len(p1) == 2, 'first_point needs to have exactly 2 coordinates'
assert len(p2) == 2, 'second_point needs to have exactly 2 coordinates'
assert p1[0] <= p2[0] and p1[1] <= p2[1], \
'first_point coordinates need to be smaller or equal to second_point coordinates'
if p2[0] - p1[0] == 0 or p2[1] - p1[1] == 0:
assert p1 == p2, 'first_point and second_point cannot lie on a horizontal or vertical line'
alpha = 1 # both points are the same
beta = 0
elif p1[0] == 0:
beta = p1[1]
alpha = (p2[1] - beta) / p2[0]
else:
beta = (p2[1] * (p1[1] - p1[0])) / (p2[0] - p1[0])
alpha = (p1[1] - beta) / p1[0]
return alpha, beta | 582401a99a3e4c98c96da242dbdef1bb58872615 | 691,527 |
def generate_placeholder(length, width):
"""
Generate "(%s, %s, %s, ...), ..." for placing parameters.
"""
return ','.join('(' + ','.join(['%s'] * width) + ')' for _ in range(length)) | 28ff2ba22f1bcfcef796f724a878c4e2c7764983 | 691,528 |
def clean(text, split=True):
"""cleans user input and returns as a list"""
if text:
if isinstance(text, str):
text = text.strip().lower()
if split:
text = text.split()
return text
else:
return [""] | 15d850dad398e4996c2f62c77a5a76e172363f82 | 691,530 |
def get_caption(attributes, feature, label, group=None):
"""Construct caption from plotting attributes for (feature, label) pair.
Parameters
----------
attributes : dict
Plot attributes.
feature : str
Feature.
label : str
Label.
group : str, optional
Group.
Returns
-------
str
Caption.
Raises
------
KeyError
``attributes`` does not include necessary keys.
"""
group_str = '' if group is None else f' ({group})'
if feature not in attributes:
raise KeyError(
f"Attributes do not include necessary key for feature '{feature}'")
if label not in attributes:
raise KeyError(
f"Attributes do not include necessary key for label '{label}'")
feature_attrs = attributes[feature]
label_attrs = attributes[label]
if 'plot_title' not in feature_attrs:
raise KeyError(
f"Attributes for feature '{feature}' does not include necessary "
f"key 'plot_title'")
if 'plot_xlabel' not in feature_attrs:
raise KeyError(
f"Attributes for feature '{feature}' does not include necessary "
f"key 'plot_xlabel'")
if 'plot_ylabel' not in label_attrs:
raise KeyError(
f"Attributes for label '{label}' does not include necessary "
f"key 'plot_ylabel'")
caption = (f"{attributes[feature]['plot_title']}: "
f"{attributes[label]['plot_ylabel']} vs. "
f"{attributes[feature]['plot_xlabel']}{group_str}.")
return caption | 5597ff8ab392a3a2db89752f87c4228a49c6069e | 691,531 |
def get_user_email(user_profile):
"""Get user e-mail address or the default address if user profile does not exist."""
# fallback address
default_email = 'bayesian@redhat.com'
if user_profile is not None:
return user_profile.get('email', default_email)
else:
return default_email | ded1e83507d979751ffb26cf2b0f272e5540c234 | 691,532 |
def uninterleave(data):
"""Given a stereo array, return separate left and right streams
This function converts one array representing interleaved left and
right audio streams into separate left and right arrays. The return
value is a list of length two. Input array and output arrays are all
Numpy arrays.
See also: interleave()
"""
return data.reshape(2, len(data)/2, order='FORTRAN') | 713e753898236c172b552b1fa245bd0e1feff0fd | 691,533 |
def make_map(name, symbol):
"""
Map Font Awesome icon names to unicode symbols
"""
return '\\expandafter\\def\\csname faicon@%(name)s\\endcsname{\\symbol{"%(symbol)s}}' % {
"name": name,
"symbol": symbol.upper(),
} | 6a474a442abcd32796332ba12a4672f45d00817c | 691,534 |
import torch
def classic_inference(model, msa, msa_extra, seq, t1d, t2d, idx_pdb, N_cycle):
"""
Classic RF2 inference
"""
msa_prev = None
pair_prev = None
xyz_prev = None
for i_cycle in range(N_cycle-1):
print('on cycle',i_cycle)
msa_prev, pair_prev, xyz_prev = model(msa[:,i_cycle],
msa_extra[:,i_cycle],
seq[:,i_cycle], idx_pdb,
t1d=t1d, t2d=t2d,
msa_prev=msa_prev,
pair_prev=pair_prev,
xyz_prev=xyz_prev,
return_raw=True,
use_checkpoint=False)
i_cycle = N_cycle-1
logit_s, logit_aa_s, pred_crds, pred_lddts = model(msa[:,i_cycle],
msa_extra[:,i_cycle],
seq[:,i_cycle], idx_pdb,
t1d=t1d, t2d=t2d,
msa_prev=msa_prev,
pair_prev=pair_prev,
xyz_prev=xyz_prev,
use_checkpoint=False)
# get sequence by argmaxing final logits
seq_out = torch.argmax(logit_aa_s, dim=1)
return logit_s, logit_aa_s, pred_crds, pred_lddts, seq_out | 18b9e7bb0b94e74f63c874e931cb2a76158bf27c | 691,536 |
def count_substring(string, sub_string):
"""
asdalskdm
"""
if sub_string not in string:
return 0
else:
return string.find(sub_string) | 2fea92ec1ed048463afae6c249fc2648836fa92a | 691,537 |
import math
def rotateX(angle, accel):
"""
rotateX() | rotates vector about the x axis\n
angle | (float) angle in radians to rotate by\n
accel | ((float), (float), (float)) vector to rotate
"""
x,y,z = accel
r1 = x
r2 = (math.cos(angle) * y) + (-math.sin(angle) * z)
r3 = (math.sin(angle) * y) + (math.cos(angle) * z)
return (r1,r2,r3) | f679fda964c1306e56b1e39057ca44b4c1b19025 | 691,538 |
from typing import Optional
import inspect
import functools
import warnings
def deprecated(reason: Optional[str]): # pragma: no cover
"""
Decorator to mark functions as deprecated and log a warning in case it is called.
Source: https://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically
"""
if isinstance(reason, str):
# The @deprecated is used with a 'reason'.
#
# .. code-block:: python
#
# @deprecated("please, use another function")
# def old_function(x, y):
# pass
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@functools.wraps(func1)
def new_func1(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning)
warnings.warn(
fmt1.format(name=func1.__name__, reason=reason), category=DeprecationWarning, stacklevel=2
)
warnings.simplefilter("default", DeprecationWarning)
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
# The @deprecated is used without any 'reason'.
#
# .. code-block:: python
#
# @deprecated
# def old_function(x, y):
# pass
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@functools.wraps(func2)
def new_func2(*args, **kwargs):
warnings.simplefilter("always", DeprecationWarning)
warnings.warn(fmt2.format(name=func2.__name__), category=DeprecationWarning, stacklevel=2)
warnings.simplefilter("default", DeprecationWarning)
return func2(*args, **kwargs)
return new_func2
else:
raise TypeError(repr(type(reason))) | ceeb38be13ea77f8fdb431b470aa606efdbe6616 | 691,539 |
def resolve_feedback(feedback):
"""
Args:
feedback:
Returns:
"""
return "{title}\n{body}".format(title=feedback.title, body=feedback.message) | f0e7d2b9e1d8870895d63b905c5cc2fbcf2c6a72 | 691,540 |
import math
def wien_deriv(x):
"""
The first derivative of the nonlinear equation in wien().
"""
return 1 - 5 * math.exp(-x) | cd3836648921b932641c4d6b9d28729c8d6ef7a8 | 691,542 |
import io
import csv
def to_csv(output):
"""Output is a list of records"""
if not output:
return
string_io = io.StringIO(newline=None)
fieldnames = list({k for d in output for k in d.keys()})
writer = csv.DictWriter(string_io, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(output)
return string_io.getvalue() | 42795edc235824bece766f36b0da107b347265f0 | 691,543 |
import os
import logging
def _create_and_return(path):
""" Creates the folder if it does not exist and return the path.
:param path: String; path of the folder to create
:return: String; path of the created folder"""
if not os.path.exists(path):
logging.warning("creating %s", path)
os.makedirs(path)
return path | b397946ed0d8965974caaefc1839a258d1ccd119 | 691,544 |
def get_status():
"""
Used to indicate that the server is healthy and ready to go.
"""
return '', 204 | 951150f8ab9f5af0271f76cdf56aa9a363182798 | 691,545 |
def find_production_volume_by_id(dataset, uuid):
"""Find production volume in ``dataset`` with id ``uuid``.
Raises ``ValueError`` if ``uuid`` not found in dataset, or if the found exchange does not have a production volume."""
for exc in dataset['exchanges']:
if exc['id'] == uuid:
if 'production volume' not in exc:
raise ValueError("Referenced exchange does not have a prod. volume")
return exc['production volume']
raise ValueError("Exchange id {} not found in dataset".format(uuid)) | 96bf9a84d360df7e6173a02ef09b9fbcf223af5c | 691,546 |
import torch
def make_shifted_mask(input_mask, max_v_len, max_t_len, memory_len=0, decoder=False):
"""
Args:
input_mask: (N, L) with `1` indicates valid bits, `0` indicates pad
max_v_len: int, the first `max_v_len` is for video and its padding, the length
of the rest of the bits is `max_t_len`. We have L = `max_v_len` + `max_t_len`.
Note max_v_len may also include the memory len (M), thus max_v_len += M
max_t_len: int
memory_len: int, M
Returns:
>>> max_v_len_ = 2
>>> max_t_len_ = 3
>>> input_mask_ = torch.randn(2, 5)
>>> make_pad_shifted_mask(input_mask_, max_v_len_, max_t_len_)[0]
tensor([[1., 1., 0., 0., 0.],
[1., 1., 0., 0., 0.],
[1., 1., 1., 0., 0.],
[1., 1., 1., 1., 0.],
[1., 1., 1., 1., 1.]])
"""
bsz, seq_len = input_mask.shape
assert max_v_len + max_t_len + memory_len == seq_len
shifted_mask = input_mask.new_zeros(
bsz, max_v_len + max_t_len, seq_len
) # (N, L, M+L)
shifted_mask[:, :, : memory_len + max_v_len] = 1
shifted_mask[:, max_v_len:, memory_len + max_v_len :] = torch.tril(
input_mask.new_ones(max_t_len, max_t_len), diagonal=0
)
if decoder:
shifted_mask = torch.ones(shifted_mask.size())
return shifted_mask | df441f3bee1b495fa01c028c2008e752cdd6a290 | 691,547 |
def factorial(x):
"""This is a recursive function
to find the factorial of an integer"""
if x == 1:
return 1
else:
return (x * factorial(x-1)) | 58bbb5794e6110a28653560654e7dd689def3bd8 | 691,548 |
def construct_lvol_store(client, bdev_name, lvs_name, cluster_sz=None):
"""Construct a logical volume store.
Args:
bdev_name: bdev on which to construct logical volume store
lvs_name: name of the logical volume store to create
cluster_sz: cluster size of the logical volume store in bytes (optional)
Returns:
UUID of created logical volume store.
"""
params = {'bdev_name': bdev_name, 'lvs_name': lvs_name}
if cluster_sz:
params['cluster_sz'] = cluster_sz
return client.call('construct_lvol_store', params) | 33463a7efd59a92ba68eecfca98277f6e273520c | 691,549 |
def check_states_are_close(num_nodes, num_states, p_state, p_next, tol):
"""
Check if all markov state probabilities from the current and the (already calculated) next step are all close
up to a given tolerance tol. The operation is executed for all nodes and all states. Any entry that exceeds the
tolerance immediately implies a 'False' return.
"""
for i_s in range(num_states):
for i in range(num_nodes):
if abs(p_next[i_s, i] - p_state[i_s, i]) >= tol:
return False
return True | f94d06a17d9fce0c0633b2de5fb9052e20cd3033 | 691,550 |
def dxdt(y, z):
"""Computes the equation for x prime"""
dx = -y - z
return dx | 5307deada35d965dafa6e32576d67335c522d775 | 691,551 |
def side_pos(curr_index, level):
""" Get the position indicator in the current side """
return divmod(curr_index, (level - 1) * 2) | 48c5b0c6f114cd1e0455c56bdd1f87f2b8bb23e8 | 691,552 |
import tempfile
import os
def _get_temp_file_name():
"""
Create a tempfile, get the name and then deletes the tempfile.
The behaviour of tempfiles is inconsistent between operating systems,
this helps to ensure consistent behaviour.
"""
file = tempfile.NamedTemporaryFile(
prefix='gva-',
delete=True)
file_name = file.name
file.close()
try:
os.remove(file_name)
except OSError:
pass
return file_name | d2bbc90afa7da641304733a0aeb0a0f79b13eaa1 | 691,553 |
def split_1_grams_from_n_grams(topics_weightings):
"""
Pair every words with their weightings for topics into dicts, for each topic.
:param topics_weightings: it is a 3D list of shape [topics, number_of_top_words, 2] where
the 2 is two entries such as (top_word, top_words_weighting).
:return: Two arrays similar to the input array where the 1-grams were splitted from the
n-grams. The n-grams are in the second array and the 1-grams in the first one.
"""
_1_grams = [[] for _ in range(len(topics_weightings))]
_n_grams = [[] for _ in range(len(topics_weightings))]
for i, topic_words_list in enumerate(topics_weightings):
for word, weighting in topic_words_list:
tuple_entries = (word, weighting)
if ' ' in word:
_n_grams[i].append(tuple_entries)
else:
_1_grams[i].append(tuple_entries)
return _1_grams, _n_grams | cdb058d4ad718d578e2e11ec3d4ae9eb008224b4 | 691,555 |
def temporal_iou(span_A, span_B):
"""
Calculates the intersection over union of two temporal "bounding boxes"
span_A: (start, end)
span_B: (start, end)
"""
union = min(span_A[0], span_B[0]), max(span_A[1], span_B[1])
inter = max(span_A[0], span_B[0]), min(span_A[1], span_B[1])
if inter[0] >= inter[1]:
return 0
else:
return float(inter[1] - inter[0]) / float(union[1] - union[0]) | 184b83bf110e1831f4829500d8405e1898c52968 | 691,556 |
import glob
def get_filelist():
"""
:return: a list containing all data filenames
"""
return [filename.split('.')[0] for filename in glob.glob('data/files/*.dat')] | 4ad30f6dbf29c645f1877ba73ea87be83650bc8c | 691,557 |
def _max(arr):
"""Maximum of an array, return 1 on empty arrays."""
return arr.max() if arr is not None and len(arr) > 0 else 1 | 72e2a6b5c3a6d09cd38e858211280704be58c838 | 691,558 |
def calc_fee(total):
"""約定手数料計算(楽天証券の場合)
"""
if total <= 50000:
return 54
elif total <= 100000:
return 97
elif total <= 200000:
return 113
elif total <= 500000:
return 270
elif total <= 1000000:
return 525
elif total <= 1500000:
return 628
elif total <= 30000000:
return 994
else:
return 1050 | fdb238ac749602b5fe8975cb9b114df3f89eb0c5 | 691,559 |
def extract_average_token_length(**args):
"""
Example query feature that gets the average length of normalized tokens in the query„
Returns:
(function) A feature extraction function that takes a query and
returns the average normalized token length
"""
# pylint: disable=locally-disabled,unused-argument
def _extractor(query, resources):
tokens = query.normalized_tokens
average_token_length = sum([len(t) for t in tokens]) / len(tokens)
return {'average_token_length': average_token_length}
return _extractor | ac1d98ede91b1a8d8b99e63093bb3f5a79747d59 | 691,560 |
import json
def load_iwp_labels( iwp_labels_path ):
"""
Loads IWP labels from a file.
Takes 1 argument:
iwp_labels_path - Path to serialized IWP labels.
Returns 1 value:
iwp_labels - List of IWP labels read from iwp_labels_path.
"""
with open( iwp_labels_path, "r" ) as iwp_labels_fp:
iwp_labels = json.load( iwp_labels_fp )
# ensure that the slice indices are integral regardless of how they were
# serialized.
for iwp_label in iwp_labels:
iwp_label["time_step_index"] = int( iwp_label["time_step_index"] )
iwp_label["z_index"] = int( iwp_label["z_index"] )
return iwp_labels | aad7e9b68c704a83f8b80809d0e6772d5c9a1020 | 691,561 |
def parse_bm_alleles_col(_str):
"""
Parse 'alleles' column of biomart response
"""
if _str == '':
return []
else:
return _str.split('/') | 31e562608781e14decca447073b45bae1f20c8f4 | 691,562 |
def geometry_column_name(df):
"""Get geometry column name, fall back to 'geometry'
Args:
df (pandas.DataFrame): [description]
Returns:
geom_col (string): [description]
"""
try:
geom_col = df.geometry.name
except AttributeError:
geom_col = 'geometry'
return geom_col | 46141489406ad9a542848bbe337e22af5e4e92fb | 691,563 |
def save_file(req):
"""Saves a file from file context"""
local_filename = req.url.split('/')[-1]
with open(local_filename, 'wb') as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
#f.flush() commented by recommendation from J.F.Sebastian
return local_filename | 055dbb29ca680c9b5a8d037986c5d0f65f8b9212 | 691,564 |
def initials(name):
"""
Converts a name to initials form.
:param name: a string of words.
:return: the string in initials form.
"""
return ".".join([x[0].upper() for x in name.split()[:-1]] + [name.split()[-1].title()]) | 893656321c24b5cc54054a753e388f9f0cda2e63 | 691,565 |
def should_update(iteration, epoch, settings):
"""
Tells whether it is time to update the plateaus or not
:param iteration: iteration number
:param epoch: epoch number
:param settings: settings dictionary
:return: True if it is time for an update, and False otherwise
"""
no_update = False if 'no_update' not in settings else settings['update']['no_update']
if no_update:
return False
return epoch == settings['update']['start_epoch'] or \
(epoch > settings['update']['start_epoch'] and epoch % settings['update']['frequency'] == 0) or \
(settings['update_first_iteration'] and iteration == 0) | bc14ddf7cf2c3a23245038a7870794ec8473b69f | 691,566 |
def is_ref_name(name):
"""Check if a given name is a reference directory
"""
ext_name = name.split('/')[-1].split('.')[1]
if ext_name == 'ref':
return True
else:
return False | 3dfdbb6222a61f6bb8fe57be87f670e066101248 | 691,567 |
import re
def trimUnicode(s):
"""
Trims string s of unicode text
"""
return re.sub(r'[^\x00-\x7F]+',' ', s) | e3dfdcce51e3a7e808383ddd8f69803d69b4010b | 691,568 |
def centroid(img):
"""
finds the centroid of the given image img
input::
img (np.ndarray):
input img to find the centroid of
return::
centroid (tuple):
centroid of the input image (height,width)
"""
centroid = img.shape[0]//2, img.shape[1]//2
return centroid | ec50349c7514f4b104ee598e864f8ecdd78cba09 | 691,569 |
def read_fasta(filename):
"""read faseta file
Parameters
----------
filename : str
Returns
-------
alignments: list
returns list of alignments
"""
read = []
alignments = {}
max_len = 0
f = open(filename, "r")
for line in f:
read.append(line)
length = len(line)
if length > max_len and '>' not in line:
max_len = length
f.close()
for i in range(0,len(read),2):
num = max_len - len(read[i+1])
seq = read[i+1][:-1] + num*'-'
alignments[read[i][1:-1]] = seq
return alignments | acdb1ed2ecb2e33b32be5551b42036cd880bb3c5 | 691,570 |
import math
def haversineDegreesToMeters(lat_1, lon_1, lat_2, lon_2):
"""
Haversine equation for finding the distance between two lat-lon points in meters.
:param lat_1: first latitude point
:param lon_1: first longitude point
:param lat_2: second latitude point
:param lon_2: second longitude point
:returns: distance in meters
:reference: http://www.movable-type.co.uk/scripts/latlong.html
:reference: http://stackoverflow.com/questions/4102520/how-to-transform-a-distance-from-degrees-to-metres
"""
r = 6371000
delta_lat = math.radians(lat_2 - lat_1)
delta_lon = math.radians(lon_2 - lon_1)
a = ((math.sin(delta_lat / 2) ** 2) +
math.cos(math.radians(lat_1)) * math.cos(math.radians(lat_2)) *
(math.sin(delta_lon / 2) ** 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
return r * c | 5c3a60e0b92d62dbe21169f40ffa99f067648fb6 | 691,571 |
def from_migration_import(module_name, fromlist):
"""
Import a migration file and return the module
:param module_name: name of migration module to import from
(ex: 001_add_images_table)
:param fromlist: list of items to import (ex: define_images_table)
:retval: module object
This bit of ugliness warrants an explanation:
As you're writing migrations, you'll frequently want to refer to
tables defined in previous migrations.
In the interest of not repeating yourself, you need a way of importing
that table into a 'future' migration.
However, tables are bound to metadata, so what you need to import is
really a table factory, which you can late-bind to your current
metadata object.
Moreover, migrations begin with a number (001...), which means they
aren't valid Python identifiers. This means we can't perform a
'normal' import on them (the Python lexer will 'splode). Instead, we
need to use __import__ magic to bring the table-factory into our
namespace.
Example Usage:
(define_images_table,) = from_migration_import(
'001_add_images_table', ['define_images_table'])
images = define_images_table(meta)
# Refer to images table
"""
module_path = 'glance.db.sqlalchemy.migrate_repo.versions.%s' % module_name
module = __import__(module_path, globals(), locals(), fromlist, -1)
return [getattr(module, item) for item in fromlist] | b8ee87cadb4950526dc1fb62f47921967601eb80 | 691,573 |
import subprocess
def run_cmd(cmd):
"""
Function to submit a job using subprocess
Args:
cmd: Command to run
Returns:
output: Output of command
"""
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p.wait()
return output | a1be0f5a206d48317af2cb655284fea36563cea0 | 691,574 |
def clean_list(match):
"""Convert yaml list to one-line."""
data = match.group(0).split("\n")
data = [x.replace("-", "").strip() for x in data if x]
return " {}\n".format(str(data).replace("'", "")) | 0e031413455983e8cad4512b55e5ed6d054a95bd | 691,575 |
def swap_1d(perm, i, j):
"""
Swap two elements of a 1-D numpy array in-place.
Examples
--------
>>> perm = np.array([2, 1, 2, 3, 4, 5, 6])
>>> swap_1d(perm, 2, 6)
array([2, 1, 6, 3, 4, 5, 2])
"""
perm[i], perm[j] = perm[j], perm[i]
return perm | d120e9d2aa9f5d7268658698be5b31ec20a31a9d | 691,576 |
def missing_branch(children):
"""Checks if the missing values are assigned to a special branch
"""
return any([child.predicate.missing for child in children]) | 75dd5f4cd503ae614023bc73c0119bebc9bb561e | 691,577 |
import torch
def soft_frequency(logits, power=2, probs=False):
"""
Unsupervised Deep Embedding for Clustering Analysis
https://arxiv.org/abs/1511.06335
"""
if not probs:
softmax = torch.nn.Softmax(dim=1)
y = softmax(logits.view(-1, logits.shape[-1])).view(logits.shape)
else:
y = logits
f = torch.sum(y, dim=(0, 1))
t = y**power / f
p = t/torch.sum(t, dim=2, keepdim=True)
return p | 29f6f49adeb2a98e391999914981ccffa6a47456 | 691,578 |
def _replace_signature(raw_header: bytes, signature: bytes) -> bytes:
"""Replace the 'signature' field in a raw header."""
return signature + raw_header[8:] | 4e1d37f445488f091a62133f733d67f7707a6229 | 691,579 |
import subprocess
def installed_version(package_name):
"""
Validate package installed version if exists
Returns:
Full version signature if package || N/A
installed in local environment ||
"""
installed_cmd = 'pip3 show {} 2>/dev/null'.format(package_name)
try:
r = subprocess.getoutput(installed_cmd)
parsed = r.split('\n')
raw = [x for x in parsed if x.startswith('Version')][0]
return raw.split(':')[1].strip()
except Exception:
return None | 151470c28d64e56e8b7be6d6bf4f3724cac44164 | 691,580 |
def Lower(v):
"""Transform a string to lower case.
>>> s = Schema(Lower)
>>> s('HI')
'hi'
"""
return str(v).lower() | 80fbd431733fdbfd976b719c08aaa6c2b7b31e88 | 691,581 |
def read_codepoints(in_file='codepoints'):
"""Read a list of codepoints from ``in_file``.
``in_file`` cannot contain any blank lines or trailing newline!
"""
with open(in_file, 'r') as f:
return f.readlines() | 09a00d7a538334ecb2401eea9907f63d275f14eb | 691,582 |
def dummy_selector_a(node):
"""
Dummy node selector.
"""
return node['name'] in ('A', 'B', 'G') | 02311dfa0c2f2a7ae37420b5b2fe61fb29d9aea6 | 691,583 |
def misp_id(stix2_indicator_uuid: str) -> str:
"""
Converts any given STIX-2 indicator ID to a valid UUID.
@param stix2_indicator_uuid The STIX-2 Indicator ID to convert
@return a valid uuid
"""
return stix2_indicator_uuid[len("indicator--") :] | 780a0c4e7d1c7136b65098d1bc56e23b9d9febf2 | 691,584 |
import pandas as pd
def get_top_genes(adata, i):
"""retrieves top genes from each factor loadings"""
sigs = adata.var.index.to_list()
zscore = adata.uns["zscores"][:,i].tolist()
floadings = adata.uns["Fs_diff"][:,i].tolist()
pvals = adata.uns["pval_mat"][:,i].tolist()
hum = pd.DataFrame([zscore, floadings, pvals]).T
hum.index = sigs
hum.columns = ["z_score", "Fs_diff", "pval"]
return hum.sort_values("z_score", ascending = False) | 2442437c738e4ec286d43ffb23d6d2048fc85c3a | 691,585 |
def remove_duplicates(values):
"""
Source: https://www.dotnetperls.com/duplicates-python
"""
output = []
seen = set()
for value in values:
# If value has not been encountered yet,
# ... add it to both list and set.
if value not in seen:
output.append(value)
seen.add(value)
return output | 0aaaaf999423d2eafb5781b0f949566e9cd8138f | 691,586 |
def findDisappearedNumbers(self, nums):
"""O(n) time | O(1) space"""
for idx in range(len(nums)):
if nums[idx] > 0:
while nums[idx] > 0:
num = nums[idx]
if nums[num-1] < 0:
nums[idx] = 0
else:
nums[idx], nums[num-1] = nums[num-1], -nums[idx]
return [num for num in range(1, len(nums)+1) if nums[num-1] == 0] | 812c59109d3a905e105418e73422df7c408dab4b | 691,587 |
def create_popup(js):
"""
(dict) -> str
Function returns info from json dictionary in formatted html tags view
"""
res = """<img src="{}"/><br>
Name: {}<br>
Location: {}<br>
Lang: {}<br>
Friends: {}<br>
Followers: {}<br>
Created at: {}<br>
""".format(js[1], js[2], js[0], js[6], js[3], js[4], js[5])
return res.replace("'", "`") | 8019b3f72929d25e6ede30514c4b95b330515b97 | 691,588 |
import logging
def check_batch(batch, max_length=None):
"""print data shapes and check max_length"""
def _print(k, d):
if k in d:
print(f'\t{k}: {d[k].shape}')
logging.info(f'Get protein_name: {batch["name"]}')
for k in ['aatype', 'msa_feat', 'extra_msa', 'masked_msa_only']:
_print(k, batch["feat"])
for k in ['all_atom_positions']:
_print(k, batch["label"])
L = batch["feat"]['aatype'].shape[2]
if not max_length is None and L > max_length:
print(f'\tskip {batch["name"]} due to two long length')
return False
return True | d06862bc2bccab8f88998718e7c3351899a6d49d | 691,589 |
def vote_smart_candidate_bio_object_filter(one_candidate_bio):
"""
Filter down the complete dict from Vote Smart to just the fields we use locally
:param one_candidate_bio:
:return:
"""
one_candidate_bio_filtered = {
'candidateId': one_candidate_bio.candidateId,
'crpId': one_candidate_bio.crpId, # Open Secrets ID
'firstName': one_candidate_bio.firstName,
'nickName': one_candidate_bio.nickName,
'middleName': one_candidate_bio.middleName,
'lastName': one_candidate_bio.lastName,
'suffix': one_candidate_bio.suffix,
'birthDate': one_candidate_bio.birthDate,
'birthPlace': one_candidate_bio.birthPlace,
'pronunciation': one_candidate_bio.pronunciation,
'gender': one_candidate_bio.gender,
'family': one_candidate_bio.family,
'photo': one_candidate_bio.photo,
'homeCity': one_candidate_bio.homeCity,
'homeState': one_candidate_bio.homeState,
'religion': one_candidate_bio.religion,
# 'specialMsg': one_candidate_bio.specialMsg,
# 'parties': one_candidate_bio.parties,
# 'title': one_candidate_bio.title,
# 'shortTitle': one_candidate_bio.shortTitle,
# 'name': one_candidate_bio.name,
# 'type': one_candidate_bio.type,
# 'status': one_candidate_bio.status,
# 'firstElect': one_candidate_bio.firstElect,
# 'lastElect': one_candidate_bio.lastElect,
# 'nextElect': one_candidate_bio.nextElect,
# 'termStart': one_candidate_bio.termStart,
# 'termEnd': one_candidate_bio.termEnd,
# 'district': one_candidate_bio.district,
# 'districtId': one_candidate_bio.districtId,
# 'stateId': one_candidate_bio.stateId,
}
return one_candidate_bio_filtered | 3a96877007109d45660c3fc600f8c44d7836c6c2 | 691,590 |
def mover_torre(tablero, x_inicial, y_inicial, x_final, y_final):
"""
(list of list, int, int, int, int) -> list of list
:param tablero: list of list que representa el tablero
:param x_inicial: int que representa la posicion inicial en X
:param y_inicial: int que representa la posicion inicial en Y
:param x_final: int que representa la posicion final en X
:param y_final: int que representa la posicion final en Y
:return: list of list que representa un tablero final
"""
tab = tablero.copy()
if tab[y_inicial][x_inicial].lower() == 't':
if x_inicial == x_final and y_inicial != y_final:
if y_inicial < y_final:
y_auxiliar = y_inicial + 1
else:
y_auxiliar = y_inicial - 1
for y in range(y_auxiliar, y_final):
if tab[y][x_inicial] != ' ':
raise Exception('No hay camino para mover la torre')
elif x_inicial != x_final and y_inicial == y_final:
if x_inicial < x_final:
x_auxiliar = x_inicial + 1
else:
x_auxiliar = x_inicial - 1
for x in range(x_auxiliar, x_final):
if tab[x][y_inicial] != ' ':
raise Exception('No hay camino para mover la torre')
else:
raise Exception('Movimiento invalido para la torre')
if tab[y_final][x_inicial] == ' ' \
or (tab[y_inicial][x_inicial].islower() != tab[y_final][x_inicial].islower()):
tab[y_final][x_inicial] = tab[y_inicial][x_inicial]
tab[y_inicial][x_inicial] = ' '
else:
raise Exception('No puedo comer mis propias piezas')
else:
raise Exception('La pieza en x = {0} y={1} no es una torre'.format(x_inicial, y_inicial))
return tab | ae0cb82233ca0ed293cc65161afaa2fb08a17c67 | 691,592 |
import fcntl, os
import io
from typing import Any
def set_non_block(io:'io.IOBase')->'Any':
"""set io/file/fd non-blocking"""
fd = io.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
return fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK) | f49372973a676a14e1289e4c8338f6a1ec9bc9c2 | 691,593 |
def totaler(products):
"""Totals the total value of each product."""
totalDict = {'base': 0, 'VAT': 0, 'total': 0};
for h in products:
totalDict['base'] += h['base'];
totalDict['VAT'] += h['VAT'];
totalDict['total'] += h['total'];
return totalDict; | 1c27cc2a69f44eb60d71cdd12ef887c8d1c7daf1 | 691,594 |
import argparse
def parse_args():
""" parse input args """
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", type=str, help="log file")
parser.add_argument("--url", type=str, help="url")
parser.add_argument("--build_type", type=str, help="build_type")
parser.add_argument("--repo", type=str, help="repo name")
parser.add_argument("--inference_path", type=str, help="inference path")
parser.add_argument("--branch", type=str, help="branch")
parser.add_argument("--task_type", type=str, help="task type")
parser.add_argument("--task_name", type=str, help="task name")
parser.add_argument("--owner", type=str, help="owner")
parser.add_argument("--build_id", type=str, help="build_id")
parser.add_argument("--build_number", type=str, help="build_number")
parser.add_argument("--ce_api_version", type=str, default="V1", help="ce_api_version")
parser.add_argument("--job_id", type=str, default="None", help="xly_job_id")
return parser.parse_args() | 65f3aaffa7fb45b007eb0c98fe84928b6c861a8b | 691,595 |
def snp_resultsHandover(variantId):
"""Create the resultsHanover dict by inserting the variantId into the template."""
resultsHandover = [ {
"handoverType" : {
"id" : "data:1106",
"label" : "dbSNP ID"
},
"note" : "Link to dbSNP database",
"url" : f"https://www.ncbi.nlm.nih.gov/snp/?term={variantId}"
}, {
"handoverType" : {
"id" : "data:1106",
"label" : "dbSNP ID"
},
"note" : "Link to dbSNP API",
"url" : f"https://api.ncbi.nlm.nih.gov/variation/v0/beta/refsnp/{variantId[2:]}"
} ]
return resultsHandover | 177d7437d759696538de70c26b326083d3cd22fc | 691,596 |
from typing import Counter
def detect_basement(input_str):
"""Return the first time counter goes negative"""
counter = Counter()
for i in input_str:
counter += Counter(i)
if counter['('] < counter[')']:
break
return counter['('] + counter[')'] | 07e309c11b1c508fa6bd92f571f39695ea8e9055 | 691,597 |
def instance_supports_efa(instance_type: str) -> bool:
"""Checks if instance supports Amazon Elastic Fabric Adapter"""
# https://docs.aws.amazon.com/en_us/AWSEC2/latest/UserGuide/efa-start.html
return instance_type in ['c5n.18xlarge', 'i3en.24xlarge', 'p3dn.24xlarge'] | 7e05b87555e2de65b92df0dd6d84f31889493a08 | 691,598 |
def stk_mesh(hex_1elem_mesh):
"""Simple 1 element mesh with no fields"""
hex_1elem_mesh.populate_bulk_data(create_edges=True)
return hex_1elem_mesh | 66d7171a7c42792220caacd7eaaf2d5d9f4cacf6 | 691,599 |
def first_name(name):
""" Return the first name of a name."""
if not isinstance(name, str):
return name
return name.split(' ')[0].title() | cd1ebfc43351ef05cadf23155bc25b37289c410a | 691,600 |
import sqlite3
def get_user_review(user_id, prof_id, course_name):
"""
Fetches the user review from DB if exists.
Otherwise returns None.
"""
cursor = sqlite3.connect('./db.sqlite3').cursor()
cursor.execute("SELECT * FROM review NATURAL JOIN rating WHERE user_id = ? AND prof_id = ? AND course_name = ?;",
(user_id, prof_id, course_name,))
r = cursor.fetchone()
user_review = {}
if r:
user_review = {'review_id': r[0], 'user_id': r[1], 'text': r[2], 'date': r[3], 'semester': r[6], 'year': r[7],
'workload': r[8], 'learning': r[9], 'grading': r[10]}
return user_review | 59514104b1382157627325115bd5f041b961b3b9 | 691,601 |
def storeAnnotations(
paths,
startTimes=None,
endTimes=None,
types=None,
data=None,
storageIds=None,
deleted=None,
):
"""Stores annotations into the tag history system.
Annotations are stored by the underlying historian implementations,
so different providers may store in different ways, and some
providers may not support annotation storage. All parameters are
1-to-1, so all provided lists should be of the same length. If a
particular annotation doesn't need a parameter, that element can be
None in the list.
Args:
paths (list[str]): A list of tag paths to store for. The paths
are equivalent to what would be used for a tag history
query, and should specify the source provider as well. For
example, "[HistoryProvider/Gateway:Provider]Path/To/Tag".
This parameter is required, even if storage ids are
included, because it is used to identify the underlying
storage provider.
startTimes (list[Date]): The start times of the events. If
omitted, defaults to the current time. Optional.
endTimes (list[Date]): The end times of the event, if
applicable. If omitted, does not store an end time for the
annotation. Optional.
types (list[str]): The type id for the annotation. If not
defined, "marker" will be used. See the Annotation Types for
more details. Optional.
data (list[str]): Data for the annotation, such as text
describing the meaning of the annotation. Optional.
storageIds (list[int]): If defined, the function will instead
update the existing annotation instead of adding new ones,
overriding existing values for the annotation with those
provided by this function (if the corresponding delete
parameter is True). Storage id is available on the
Annotation object, and is returned as the result value from
the storeAnnotations call. Optional.
deleted (list[bool]): A list of booleans indicating that the
individual annotation should be deleted. Requires storage id
to be set as well. Optional.
Returns:
list[QualifiedValue]: A list of qualified values. The quality
code will indicate success or failure, and if successful,
the storage id of the annotation will be returned in the
value. Since annotations are stored by individual providers,
there is no guarantee as to the type or format of the
storage id. However, it can be held and re-used in order to
update or delete.
"""
print(paths, startTimes, endTimes, types, data, storageIds, deleted)
return None | 964a4c8459b0b05685726029b8e87084847c0766 | 691,602 |
import re
import os
def _normalize_string_value(value, case):
###############################################################################
"""
Some of the strings are inherently prone to diffs, like file
paths, etc. This function attempts to normalize that data so that
it will not cause diffs.
"""
# Any occurance of case must be normalized because test-ids might not match
if (case is not None):
case_re = re.compile(r'{}[.]([GC])[.]([^./\s]+)'.format(case))
value = case_re.sub("{}.ACTION.TESTID".format(case), value)
if ("/" in value):
# File path, just return the basename
return os.path.basename(value)
elif ("username" in value):
return ''
elif (".log." in value):
# Remove the part that's prone to diff
components = value.split(".")
return os.path.basename(".".join(components[0:-1]))
else:
return value | 9cf9dd84b2f7a7d23e006ec43c0f4a3e92d465c0 | 691,604 |
def lineline(x1, y1, x2, y2, x3, y3, x4, y4, get_intersection=False):
""" Used to detect line on line intersection (Medium-fast) """
try:
uA = ((x4-x3)*(y1-y3) - (y4-y3)*(x1-x3)) / ((y4-y3)*(x2-x1) - (x4-x3)*(y2-y1))
uB = ((x2-x1)*(y1-y3) - (y2-y1)*(x1-x3)) / ((y4-y3)*(x2-x1) - (x4-x3)*(y2-y1))
except ZeroDivisionError:
if y2-y1 == 0 and y4-y3 == 0:
return False
return [0, 0] if get_intersection else True
if uA >= 0 and uA <= 1 and uB >= 0 and uB <= 1:
return [x1 + (uA * (x2-x1)), y1 + (uA * (y2-y1))] if get_intersection else True
return False | 4d66091cf5e933cd5190a4b47917e0c410e20d53 | 691,605 |
def get_html_xml_path(path, build_name):
"""Parse and replace $BUILD_NAME variable in the path.
Args:
path(str): path to html report
build_name(str): software build number
Returns:
str: modified path to html report
"""
try:
return path.replace("__BUILD_NAME__", build_name)
except AttributeError:
return "undetermined" | 204722413aea85221756eb53e677d265c20200b5 | 691,606 |
def insert_music_library_col(
row, query_cols: tuple, table_name: str, session, col_name_file_map: dict
) -> None:
"""Construct query to insert row of data from pandas itetuple object using
list of columns into the named table and execute it with the
given cassandra session
Args:
row:pandas object row from pandas itertuple method
query_cols:tuple list of column names to insert
table_name:str name of the table to insert into
session cassandra session object
Returns:
None
"""
def _get_value_from_csv(row, key: str, col_name_file_map: dict):
"""Takes a row and extracts the value
using the defined column mapping
This saves us from having to deal with
column names as defined in the csv, we can
stick with our own naming convention
Args:
row:Row casssandra row object
key:str column name to get as defined in table names
"""
return row._asdict().get(col_name_file_map.get(key))
# get elements in query_cols as one string for insertion into query
query_cols_asstring = ", ".join(query_cols)
# compose query insert statement
query_insert = f"INSERT INTO {table_name} ({query_cols_asstring}) "
# for each element in query cols create value substitution magics
# to avoid having to match these to the number of columns we're inserting
subs = ", ".join(["%s" for x in query_cols])
# compose value insertion query string
values = f"VALUES ({subs})"
# get the data from row looking up the column names
cols = [
_get_value_from_csv(row, col_name, col_name_file_map) for col_name in query_cols
]
# execute the session, casting cols to tuple for compatability with execute method
try:
session.execute(
f"{query_insert}{values}",
tuple(cols),
)
except Exception as e:
print(e) | 9a1ae7bb9a2d013431b8d0f148f9766a52525882 | 691,607 |
def _parse_param_batch(param):
"""Work for both numpy and tensor"""
N = param.shape[0]
p_ = param[:, :12].view(N, 3, -1)
p = p_[:, :, :3]
offset = p_[:, :, -1].view(N, 3, 1)
alpha_shp = param[:, 12:52].view(N, -1, 1)
alpha_exp = param[:, 52:].view(N, -1, 1)
return p, offset, alpha_shp, alpha_exp | f4c897a100c0c49ec7f77e96cb4d9e4d9b87b916 | 691,608 |
def capacity_cost_rule(mod, g, p):
"""
The capacity cost of projects of the *gen_spec* capacity type is a
pre-specified number equal to the capacity times the per-mw fixed cost
for each of the project's operational periods.
"""
return mod.gen_spec_capacity_mw[g, p] * mod.gen_spec_fixed_cost_per_mw_yr[g, p] | 521348dc171961e0687661a20a6898a1eca3daf6 | 691,609 |
def asst70_propositions(moa_aid70_proposition):
"""Create assertion70 propositions test fixture."""
return [moa_aid70_proposition] | 19e9ff7efc6300406bef80ccec4d0995eb60bfef | 691,610 |
def package_positions():
"""
获取 背包 位置
:return:
"""
position1 = (110, 170, 55, 90)
return [position1] | 0915d49e0b7386e1f1a953a790f38cfaed47d2b2 | 691,611 |
def get_locations(data: dict) -> list:
"""
Get users' locations from the dictionary. Return list of lists, every one
of which contains user's nickname and location.
>>> get_locations({'users': [{'screen_name': 'Alina', 'location':\
'Lviv, Ukraine'}]})
[['Alina', 'Lviv, Ukraine']]
"""
result = []
users = data['users']
for user in users:
name = user['screen_name']
location = user['location']
result.append([name, location])
return result | 21c2cbc984e085d8b7b6da418e8184aa2d037cd2 | 691,612 |
def _label(label: str) -> str:
"""
Returns a query term matching a label.
Args:
label: The label the message must have applied.
Returns:
The query string.
"""
return f'label:{label}' | 10472e6850923d2f35bdff1fb3603f82293a3d15 | 691,613 |
import os
def get_all_subdir_paths(dirpath):
"""Return list of subdirectory paths for a given directory."""
dirlist = []
for root, directories, _filename in os.walk(dirpath):
for dirs in directories:
dirlist.append(os.path.join(root, dirs))
return dirlist | dc9a8124c7fd5afa6b771614de41fe7b32c8c15f | 691,614 |
from typing import List
from typing import Dict
from typing import Iterable
def get_invalid_value(ticket: List[int], rules: Dict[str, List[Iterable]]) -> int:
"""Is a ticket valid?"""
valid_ranges = sum(rules.values(), [])
invalid_values = 0
for value in ticket:
if all(value not in r for r in valid_ranges):
invalid_values += value
return invalid_values | 610824ab222478d28594695ae9a89524ca69ea8b | 691,615 |
def foldr_lazy(combine, initial, xs):
"""Foldable t => (a -> (() -> b) -> (() -> b)) -> b -> t a -> b
Nonstrict right-associative fold.
This function is similar to ``foldr`` in Haskell, but note that the combine
function uses singleton lambda functions to achieve laziness and to enable
tail-call optimization.
Let's have a closer look on the signature of ``combine`` to understand the
possibilities a bit better:
The signature of ``combine`` is ``a -> (() -> b) -> (() -> b)``. You can
think of these ``() -> b`` as "lazy" accumulated values. ``() -> b`` is
just a lambda function that takes no arguments and returns a value of type
``b``. Let's name the first argument ``x :: a`` and the second argument
``lacc :: () -> b`` (as "lazy accumulator"). Now we can explain how to make
use of some powerful features:
- When ``combine`` doesn't use it's second argument ``lacc``, the recursion
stops (i.e., short-circuits). Therefore, infinite lists might be
processed in finite time.
- When ``combine`` returns ``lacc`` as such without modifying it, tail-call
optimization triggers. Therefore, very long lists might be processed
efficiently without exceeding maximum recursion depth or overflowing the
stack.
- When ``combine`` uses ``lacc`` but the evaluation ``lacc()`` is
"post-poned" (e.g., it happens inside yet another lambda function), the
recursion becomes "lazy" as it will continue only when ``lacc()``
actually gets evaluated. Therefore, infinite lists can be processed
lazily.
Note that Python's built-in ``reduce`` function doesn't support these
features.
Examples
--------
Short-circuiting and tail-call optimization for an infinite list:
>>> xs = iterate(lambda x: x + 1, 1)
>>> my_any = foldr_lazy(lambda x, lacc: (lambda: True) if x else lacc, False)
>>> my_any(xs.map(lambda x: x > 100000))
True
Looking at ``(lambda: True) if x else lacc``, we can see that when the left
side of the if-expression is evaluated, the fold short-circuits because
``True`` makes no use of ``lacc``. When the right side of the if-expression
is evaluated, the fold uses efficient tail-call optimization because
``lacc`` is returned unmodified.
Lazy evaluation makes it possible to transform infinite structures:
>>> from haskpy import Cons, Nil
>>> my_map = lambda f: foldr_lazy(lambda x, lacc: lambda: Cons(f(x), lacc), Nil)
>>> my_map(lambda x: x ** 2)(xs)
Cons(1, Cons(4, Cons(9, Cons(16, Cons(25, Cons(36, ...))))))
The infinite list gets mapped lazily because ``lacc`` isn't called at this
time, it is delayed as the linked list will call it only when the next
element is requested.
Note that sometimes you shouldn't use ``foldr_lazy`` because it can cause
stack overflow (or rather hit Python's recursion limit) because of too deep
recursion. This can happen with a long list when the recursion doesn't
short-circuit (early enough), tail-call optimization cannot be used and the
recursion doesn't pause for laziness. A simple such example is a summation:
>>> my_sum = foldr_lazy(lambda x, lacc: lambda: x + lacc(), 0)
>>> my_sum(xs.take(100))
5050
>>> my_sum(xs.take(1000000))
Error
For this kind of folds, you should use ``foldl``.
As already shown, ``foldr_lazy`` generalizes ``map`` and ``any``. It can
also be used to implement many other functions in a may that may seem a bit
surprising at first, for instance:
>>> from haskpy import Just, Nothing
>>> my_head = foldr_lazy(lambda x, lacc: lambda: Just(x), Nothing)
>>> my_head(xs)
Just(1)
>>> my_head(Nil)
Nothing
"""
return xs.foldr_lazy(combine, initial) | 9d81d7855d1d5ab904236c30301d3551254fd691 | 691,616 |
import torch
def collate_fn(data):
"""Sorts the input(the mini-batch) by length and truncates all the data points to the max length of the input"""
sorted_data = sorted(data, key=lambda x: x[2], reverse=True)
max_len = sorted_data[0][2]
x = torch.stack([x_i[:max_len] for x_i, y_i, l_i in sorted_data])
y = torch.stack([y_i[:max_len] for x_i, y_i, l_i in sorted_data])
l = [l_i for _, _, l_i in sorted_data]
return x, y, l | 4a04f4cf82b0db57ef9675d68887eced23662d0a | 691,617 |
def seasonal_naive(data, n=7, **kwargs):
"""The seasonal naive forecast for the next point is the value observed ``n`` points
prior in the series.
The seasonal parameter (``n``) does not have units of time, but rather units of one
observation. For example, to account for weekly cycles within daily observations, ``n=7``.
Args:
data (np.array): Observed data, presumed to be ordered in time.
n (int): period of data seasonality
Returns:
float: a single-valued forecast for the next value in the series.
"""
forecast = data[-n]
return forecast | a72ecc75a2c79f3e0add9659bf576a355deacfe3 | 691,618 |
import random
def train_test_split(dataArff, test_pct=0.5, random_seed=None):
"""
Generates partition (training, testing) pairs from the items in X
author: Sébastien Destercke
source: https://github.com/sdestercke/classifip/classifip/evaluation/__init__.py
"""
training = dataArff.make_clone()
if random_seed is not None:
random.seed(random_seed)
random.shuffle(training.data)
testing = training.make_clone()
idx_end_train = int(len(dataArff.data) * (1 - test_pct))
training.data = training.data[:idx_end_train]
testing.data = testing.data[idx_end_train:]
return training, testing | 2b19e89ae865634cb666cf4c151fe6ec772f414b | 691,620 |
import struct
def NS(t):
"""
net string
"""
return struct.pack('!L',len(t)) + t | 22e0205091ff2fc723fcd0c357c847281cbe6bfa | 691,621 |
import random
def pick(obj):
"""randomly pick an element from object"""
return random.sample(obj, 1)[0] | eded3083c775370dc5e2d23046934b98bfe1ef38 | 691,622 |
import numpy
def get_chemu_all_sol(mf, oa, ob, anti=False):
"""Get unrestricted ERIs in chemist's
notation for given orbital Coeffs.
"""
na = oa.shape[1]
nb = ob.shape[1]
n = na + nb
Ia = mf.with_df.ao2mo(
(oa, oa, oa, oa), mf.kpt, compact=False).reshape(na, na, na, na)
Ib = mf.with_df.ao2mo(
(ob, ob, ob, ob), mf.kpt, compact=False).reshape(nb, nb, nb, nb)
Iab = mf.with_df.ao2mo(
(oa, oa, ob, ob), mf.kpt, compact=False).reshape(na, na, nb, nb)
Id = numpy.zeros((n, n, n, n), dtype=oa.dtype)
Id[:na, :na, :na, :na] = Ia
Id[na:, na:, na:, na:] = Ib
Id[:na, :na, na:, na:] = Iab
Id[na:, na:, :na, :na] = Iab.transpose((2, 3, 0, 1))
if anti:
return Id - Id.transpose(0, 3, 2, 1)
else:
return Id | 09ba23fef475ce84d952b2ecbbbb301bedad6192 | 691,623 |
import re
def probability_cube_name_regex(cube_name):
"""
Regular expression matching IMPROVER probability cube name. Returns
None if the cube_name does not match the regular expression (ie does
not start with 'probability_of').
Args:
cube_name (str):
Probability cube name
"""
regex = re.compile(
'(probability_of_)' # always starts this way
'(?P<diag>.*?)' # named group for the diagnostic name
'(_in_vicinity|)' # optional group, may be empty
'(?P<thresh>_above_threshold|_below_threshold|_between_thresholds|$)')
return regex.match(cube_name) | 6fafd524405def4490729846f2d4b705b14225a0 | 691,624 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.