content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def beaufort_limits():
"""
Min and Max wind speed values corresponding to the various Beaufort sea states according to
https://nl.wikipedia.org/wiki/Schaal_van_Beaufort
:return:
"""
return [(0, 0.2), (0.2, 1.5), (1.5, 3.3), (3.3, 5.4), (5.4, 7.9), (7.9, 10.7), (10.7, 13.8), (13.8, 17.1),
(17.1, 20.7), (20.7, 24.4), (24.4, 28.4), (28.5, 32.6), (32.7, 100)] | a9fb23c897c75a404d6f330998432f904d0a14ce | 695,526 |
def _keep(window, windows):
"""Helper function for creating rolling windows."""
windows.append(window.copy())
return -1. # Float return value required for Pandas apply. | d7e8356587975a5ee07990f85f2a88e74fdff1d4 | 695,527 |
import os
import subprocess
def run_test_coverage(directory):
"""
Runs pylint on the directory
:param directory: string of directory (absolute or relative) to run pylint on
:return: str of results
"""
buffer = ''
for (_root, _dir, _) in os.walk(os.path.abspath(directory)):
if len(_dir) == 1:
# Get the folder name. There should only be one.
top_dir = os.path.join(_root, _dir[0])
# Step into the top level directory
os.chdir(top_dir)
# Run pytest on the directory and return stdout and stderr
sp = subprocess.run('py.test', stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# return the std output of the runner.
return sp.stdout.decode("utf-8")
buffer += 'Multiple directories contained in zip file'
return buffer | 9191fe1b9507fc3edbbfc6e30c340649a7427f82 | 695,528 |
import csv
def export_csv(path, data):
"""
Export function for CSV-format output files.
"""
with open(path, "w", newline="") as file:
writer = csv.DictWriter(file, fieldnames=data[0].keys(), quotechar='"',
quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
return writer.writerows(data) | 7e616f08613a898d54f2277a3c2d842d258a5742 | 695,529 |
import torch
def quaternion_to_angle_axis(quaternion: torch.Tensor) -> torch.Tensor:
"""Convert quaternion vector to angle axis of rotation.
Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h
Args:
quaternion (torch.Tensor): tensor with quaternions.
Return:
torch.Tensor: tensor with angle axis of rotation.
Shape:
- Input: :math:`(*, 4)` where `*` means, any number of dimensions
- Output: :math:`(*, 3)`
Example:
>>> quaternion = torch.rand(2, 4) # Nx4
>>> angle_axis = kornia.quaternion_to_angle_axis(quaternion) # Nx3
"""
if not torch.is_tensor(quaternion):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(quaternion)))
if not quaternion.shape[-1] == 4:
raise ValueError(
"Input must be a tensor of shape Nx4 or 4. Got {}".format(
quaternion.shape))
# unpack input and compute conversion
q1: torch.Tensor = quaternion[..., 1]
q2: torch.Tensor = quaternion[..., 2]
q3: torch.Tensor = quaternion[..., 3]
sin_squared_theta: torch.Tensor = q1 * q1 + q2 * q2 + q3 * q3
sin_theta: torch.Tensor = torch.sqrt(sin_squared_theta)
cos_theta: torch.Tensor = quaternion[..., 0]
two_theta: torch.Tensor = 2.0 * torch.where(
cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta),
torch.atan2(sin_theta, cos_theta))
k_pos: torch.Tensor = two_theta / sin_theta
k_neg: torch.Tensor = 2.0 * torch.ones_like(sin_theta)
k: torch.Tensor = torch.where(sin_squared_theta > 0.0, k_pos, k_neg)
angle_axis: torch.Tensor = torch.zeros_like(quaternion)[..., :3]
angle_axis[..., 0] += q1 * k
angle_axis[..., 1] += q2 * k
angle_axis[..., 2] += q3 * k
return angle_axis | 0f8fa2847bfe0c4e3305dce9f5d7d027c872e1f7 | 695,530 |
import re
def load_lex(fp):
"""Return the specifications of lex definitions in the lex file.
:param fp: filename of the lex file.
:return: list with binary tuple.
"""
def strip_comments(code):
code = str(code)
return re.sub(r'(?m)^ *#.*\n?', '', code)
def eval_spec(t):
return t[0], eval(t[1])
with open(fp, 'r') as f:
line = [strip_comments(i) for i in f.read().splitlines()
if strip_comments(i)]
tokens = list(
map(lambda x: eval_spec(re.split(r"\s*:=\s*", x)), line))
return tokens | a2c8afd22b627b969179f201a1f88e6359360178 | 695,531 |
import re
def split_frame_coord_str(frame_coord_str):
"""
Split a string by white spaces, parentheses, commas and '\n'
:param frame_coord_str:
:return:
:rtype: [int]
"""
return [int(frame_coord) for frame_coord in re.split(r'[(,)]', frame_coord_str) if frame_coord != '' and frame_coord != '\n'] | 4f4a37dcda51e9fff91df603bd4a3e4ee47033df | 695,532 |
def utrain_model(t, v_t1, v_t):
"""
:param t: Train sample time point
:param v_t1: t+1 sample point velocity
:param v_t: t sample point velocity
:return: need traction force according to the Train model
"""
if t < 500:
u_t = (v_t1 + 0.0002 * (v_t ** 2) - 1.0005 * v_t + 0.0035) / 0.0054
elif t < 1000:
u_t = (-0.00007 * (v_t ** 2) + 1.0007 * v_t - 0.0026 - v_t1) / (- 0.0050)
elif t < 1500:
u_t = (0.00006 * (v_t ** 2) + 0.9987 * v_t - 0.0030 - v_t1) / (-0.0061)
elif t < 2000:
u_t = (0.0002 * (v_t ** 2) + 0.9987 * v_t - 0.0041 - v_t1) / (-0.0066)
else:
u_t = (0.0004 * (v_t ** 2) + 0.9977 * v_t - 0.0030 - v_t1) / (-0.0061)
return u_t | c251e563f1e9b1f72fbfa65b638acffe63908ae0 | 695,533 |
def ips_to_metric(d, min_depth, max_depth):
"""
https://github.com/fyu/tiny/blob/4572a056fd92696a3a970c2cffd3ba1dae0b8ea0/src/sweep_planes.cc#L204
Args:
d: inverse perspective sampling [0, 1]
min_depth: in meter
max_depth: in meter
Returns:
"""
return (max_depth * min_depth) / (max_depth - (max_depth - min_depth) * d) | 5914277a9548caea02eab78f3370b91f4957f480 | 695,534 |
import re
def check_text(line):
""" Compares a line to see if there are any blacklisted words"""
is_okay = True
blacklist = ["CHAPTER", "Part", "Section"]
rgx_check = re.search('([A-Z])\. +(\w+)', line)
if rgx_check is not None:
is_okay = False
if line == "":
is_okay = False
else:
for word in blacklist:
if word in line:
is_okay = False
if "REPEALED" in line:
is_okay = True
return is_okay | 5bf261a3dc5f4359b97affc1897c327423704fff | 695,535 |
def process_args(args):
"""
Does any argument postprocessing
"""
if args.dump_all:
args.dump_pdb = True
args.dump_fasta = True
args.dump_trb = True
args.dump_npz = True
if args.out != None:
args.outdir= '/'.join(args.out.split('/')[:-1])
args.prefix = args.out.split('/')[-1]
return args | f2f408f981d9ccd31a8c9cc679697e0ba4ce3c3f | 695,537 |
def duplicate_count(text: str) -> int:
"""Counts amount of duplicates in a text.
Examples:
>>> assert duplicate_count("abcde") == 0
>>> assert duplicate_count("abcdea") == 1
>>> assert duplicate_count("indivisibility") == 1
"""
return len(
set(item for item in text.lower() if text.lower().count(item) > 1)
) | 0a0e7c79e3370050deff190b6b71d2e6ed83768d | 695,538 |
import torch
def min_lp(x: torch.Tensor, y: torch.Tensor, p: int = -5):
"""
The L-x norm continuous approximation of elementwise min function.
Parameters
----------
x, y: the input tensors
p: the x-norm specification
Returns
-------
"""
# An offset to avoid the singularity of autodiff at zero.
return (x ** p + y ** p) ** (1. / p) | e789a2d6448550af7c8236ae049d42ba9ad9a81d | 695,539 |
def _get_id_from_extension_link(ext_link):
"""Get the id from an extension link.
Expects the id to come after the "/extension/service/" parts of
the link
Example ext_link: '/admin/extension/service/12345/'
Example return: '12345'
:param str ext_link: the extension link to extract the id from
:return: the extension id
:rtype: str
"""
link_dirs = ext_link.split('/')
num_dirs = len(link_dirs)
ind = 0
while ind < num_dirs:
# Ensure that the id comes after /extension/service
if link_dirs[ind] == 'extension' and ind < num_dirs - 2 and \
link_dirs[ind + 1] == 'service':
return link_dirs[ind + 2]
ind += 1
return '' | 1a4ee4ea3042af22e989bb5b12bda4e0285e8719 | 695,541 |
def get_scale(bounding_box, target_size):
"""
Get a scale that would bring smaller side of bounding box to have target_size
:param bounding_box: bounding box
:param target_size: target size for smaller bounding box side
:return: float
"""
horizontal_side = bounding_box.bounds[2] - bounding_box.bounds[0]
vertical_side = bounding_box.bounds[3] - bounding_box.bounds[1]
smaller_side = horizontal_side if horizontal_side < vertical_side else vertical_side
return target_size / smaller_side | 864de85683ae427d4a4b62d8ca719dc9d35fa26e | 695,542 |
def line_range(lines, ind1, comment_flag='#'):
"""
Find a range of data lines within a line list.
Given an input line list and a starting index, subsequent lines are
examined to see where the next comment line is. Comment lines are
assumed to start with the # character by default, or one can set this
with the comment_flag variable in the call. Lines that are not comments
are assumed to be data lines. The index of the next comment line is
returned, or the index that gives a range to the end of the line list
where there is no such comment line after the index specified.
Parameters
----------
lines : A list of input lines (assumed to be from the readlines()
function)
ind1 : A starting index in the list of lines
comment_flag: An optional string variable that marks comment lines
Returns
-------
n1 : an integer value for the next comment line (assumed to
start with '#') in the list of input lines, or the index
for the length of the line list if no other comment line is
found
"""
ncomment = len(comment_flag)
for n1 in range(ind1+1, len(lines)):
if comment_flag in lines[n1][0:ncomment]:
return n1
return len(lines) | 6e845f3d44c4093e8e403eaf41317cf14b0299c4 | 695,543 |
from typing import Optional
import re
def _extract_job_id(job_name: str) -> Optional[str]:
"""Extracts job id from job name.
Args:
job_name: The full job name.
Returns:
The job id or None if no match found.
"""
p = re.compile(
'projects/(?P<project_id>.*)/locations/(?P<region>.*)/pipelineJobs/(?P<job_id>.*)'
)
result = p.search(job_name)
return result.group('job_id') if result else None | a9e9341961ae9f9df5a09b943a3e90c23fdace25 | 695,544 |
import argparse
def parse_args():
"""Parse argument values from command-line"""
parser = argparse.ArgumentParser(description='Arguments required for script.')
parser.add_argument('-t', '--job-type', required=True, choices=['process', 'analyze'],
help='process or analysis')
args = parser.parse_args()
return args | 977d72e92050bca7bceb41bca569e02c4c91f0d1 | 695,545 |
import argparse
def get_cifar_parser_params():
"""
Use to return the base config
"""
parser = argparse.ArgumentParser(description="cifar configures")
parser.add_argument('--data', type=str, default='../data',
help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=32,
help='batch size')
parser.add_argument('--learning_rate', type=float, default=0.025,
help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.001,
help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4,
help='weight decay')
parser.add_argument('--report_freq', type=float, default=50,
help='report frequency')
parser.add_argument('--gpu', type=int, default=1,
help='gpu device id')
parser.add_argument('--epochs', type=int, default=50,
help='num of training epochs')
parser.add_argument('--init_channels', type=int, default=16,
help='num of init channels')
parser.add_argument('--layers', type=int, default=8,
help='total number of layers')
parser.add_argument('--model_path', type=str, default='saved_models',
help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False,
help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16,
help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3,
help='drop path probability')
parser.add_argument('--save', type=str, default='EXP',
help='experiment name')
parser.add_argument('--seed', type=int, default=2,
help='random seed')
parser.add_argument('--grad_clip', type=float, default=5,
help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5,
help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=False,
help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4,
help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3,
help='weight decay for arch encoding')
args = parser.parse_args()
return args | a0ee2bfbc370ff7b557f2adca72b206a31f8fc2a | 695,546 |
def add(a,b):
"""
IF YOU PUT TEXT HERE THIS WILL SHOW UP WHEN YOU RUN HELP
"""
return a + b | f22b904695b97d9fc4dc1738f0a5b902c4482587 | 695,547 |
def _get_QDoubleSpinBox(self):
"""
Get current value for QDoubleSpinBox
"""
return self.value() | d63355786fcc72b0f0d1d5064ea26687cf7fa78a | 695,548 |
import re
def clean_string(s: str) -> str:
"""clean a string such that it is a valid python identifier"""
s = s.strip()
s = s.replace(".", "p") # point
s = s.replace("-", "m") # minus
s = re.sub("[^0-9a-zA-Z]", "_", s)
if s[0] in "0123456789":
s = "_" + s
return s | a48d8b44408ed0a55d8d8501c26f4bd02e5f0695 | 695,549 |
def addition(a, b):
"""
Adds two given values
Parameters:
a (int): First value
b (int): Second value
Returns:
int: sum result
"""
return a + b | 464bb6c1340c050dc661052e5da331d50c0ffecb | 695,550 |
def str2num(val: str):
"""
Try to convert to number, else keep as string
:param val: String value
:return: int, float or string
"""
try:
return int(val)
except ValueError:
try:
return float(val)
except ValueError:
return val | d8323d07de1751843506095fdd41de82b6ffa55a | 695,551 |
def _same_contents(a, b):
"""Perform a comparison of the two files"""
with open(a, 'r') as f:
a_data = f.read()
with open(b, 'r') as f:
b_data = f.read()
return a_data == b_data | 012fd08ba2be88f5cc2e3e2c5a454f8bb90a0737 | 695,552 |
import hashlib
def hash_file(filename):
"""
Utility function to hash a file
Args:
filename (str): name fo file to hash
"""
with open(filename, "rb") as f:
chunk = f.read()
return hashlib.md5(chunk).digest() | 66a0ed3e23a45fd23117779190ca5a339808be6e | 695,553 |
def validate_sequence(password):
""" Passwords must include one increasing straight of at least three
letters, like abc, bcd, cde, and so on, up to xyz """
for i, c in enumerate(password[:-2]):
if c == password[i + 1] - 1 == password[i + 2] - 2:
return True
return False | 3f97cd5bc23799cebf1067b7479127f71e5ce64a | 695,554 |
def start_error(error_kls):
"""Start error fixture."""
def _(name: str, **kwargs):
raise error_kls(name, kwargs)
return _ | 7bab4f20471e115a9357639b869b33b4e2700cc8 | 695,555 |
import torch
def corr2d_stack(X, K):
"""iterate through the 0th dimension (channel dimension) of `X` and
`K`. multiply them and stack together
"""
corr2d_stack = torch.stack([x.matmul(k) for x, k in zip(X, K)]).squeeze(-1)
corr2d_stack = corr2d_stack.permute((1, 2, 0))
return corr2d_stack | 23570dd17384a83b310888bf814833ad94a1ecc0 | 695,556 |
def smallest_multiple_of_n_geq_m(n: int, m: int) -> int:
"""
Returns the smallest multiple of n greater than or equal to m.
:param n: A strictly positive integer.
:param m: A non-negative integer.
:return: The smallest multiple of n that is greater or equal to m.
"""
return m + ((n - (m % n)) % n) | 97b9796f5093b378a078dd2083058246c79d9c46 | 695,557 |
def easy_read_disf_format_pos(words, tags):
"""Easy read style inline disfluency tagged string."""
final_tags = []
for i in range(0, len(words)):
final_tags.append("".join([tags[i].replace(" ", "_"), words[i]]))
return " ".join(final_tags) | 9a493c20f67f5720c6bc75869bfa15f1c812789e | 695,558 |
def prettier_tuple(the_tuple, indent=4):
"""
pretter tuple
:param the_tuple:
:type the_tuple:
:param indent:
:type indent:
:return:
:rtype:
"""
if not the_tuple:
return "()"
return '(\n' + " " * indent + ("," + "\n" + " " * indent).join(
str(i) for i in the_tuple) + ',\n)' | 0baa28d2407ee552ccc638b72ed9337b76a49cf2 | 695,559 |
def is_valid_interval(x):
"""
Returns true iff x is a well-shaped concrete time interval (i.e. has valid beginning and end).
"""
try:
return hasattr(x, "hasBeginning") and len(x.hasBeginning) > 0 and \
len(x.hasBeginning[0].inTimePosition) > 0 and \
len(x.hasBeginning[0].inTimePosition[0].numericPosition) > 0 and hasattr(x, "hasEnd") and \
len(x.hasEnd) > 0 and len(x.hasEnd[0].inTimePosition) > 0 and \
len(x.hasEnd[0].inTimePosition[0].numericPosition) > 0 and \
x.hasBeginning[0].inTimePosition[0].numericPosition < x.hasEnd[0].inTimePosition[0].numericPosition
except TypeError:
return False | d8e761955e2f4b5e4b199259281fbd4915272fa5 | 695,560 |
import torch
def sample_portion(vec, p=0.5):
"""
Subsamples a fraction (given by p) of the given batch. Used by
Quantile when the data gets very very large.
"""
bits = torch.bernoulli(
torch.zeros(vec.shape[0], dtype=torch.uint8, device=vec.device), p
)
return vec[bits] | ecdde38873b121d8464875a92f7549ddb8ddd06c | 695,561 |
def creating_avg_data_all_date_breakdown(df, lines, time_type, d=None, h=None):
"""
:param df: dataframe
:param lines: list
:param time_type: string
:param d: list
:param h: list
:rtype: dataframe
"""
# Homework :]
# Please run the same method again, combining the d and h parameters in a single parameter
# Hint: data type comparison
df['day_value'] = df['date_time'].apply(lambda row: row.day_name())
df['hour'] = df['date_time'].apply(lambda row: row.hour)
df_ = df[df['line'].isin(lines)].reset_index(drop=True)
if time_type == 'days':
df__ = df_[df_['day_value'].isin(d)][['day_value', 'hour', 'line',
'number_of_passenger', 'number_of_passage']].reset_index(drop=True)
df_grouped = df__.groupby(['day_value', 'hour', 'line']).mean().reset_index().round(2) \
.rename(columns={'number_of_passenger': 'avg_number_of_passenger',
'number_of_passage': 'avg_number_of_passage'})
return df_grouped
else:
df__ = df_[df_['hour'].isin(h)][['hour', 'line',
'number_of_passenger', 'number_of_passage']].reset_index(drop=True)
df_grouped = df__.groupby(['hour', 'line']).mean().reset_index().round(2) \
.rename(columns={'number_of_passenger': 'avg_number_of_passenger',
'number_of_passage': 'avg_number_of_passage'})
return df_grouped | d96ffe49294dd9fbab9047da0f290354ed91345d | 695,562 |
def stop(event):
"""
Останавливаем моделирование. При этом event 'run' завершается.
Для повторного запуска надо заново запускать event 'run'.
"""
agent = event.pool.context['agent']
net = agent.context['net']
agent.send_server('domain_state', {
'state': 'run',
'status': 'done',
})
return net.stop() | 46feada07bdeb12f9b01e0bf127bfd65c72d69a3 | 695,564 |
import torch
def _index_select_img(ico_img, imgcoord):
"""
ico_img: tensor (b x c x 1 x vertex_num)
imgcoord: numpy array
"""
b, c = ico_img.shape[:2]
device = ico_img.device
h, w = imgcoord.shape[-2:]
img = torch.index_select(ico_img, -1, imgcoord.reshape(-1))
return img.view(b, c, h, w) | 691ccedeb40d806b8c928e46f1eb02969a462848 | 695,565 |
def cmd(func):
"""Indicate that ``func`` returns a shell command with this decorator.
If this function is an operation function defined by :class:`~.FlowProject`, it will
be interpreted to return a shell command, instead of executing the function itself.
For example:
.. code-block:: python
@FlowProject.operation
@flow.cmd
def hello(job):
return "echo {job.id}"
.. note::
The final shell command generated for :meth:`~.FlowProject.run` or
:meth:`~.FlowProject.submit` still respects directives and will prepend e.g. MPI or OpenMP
prefixes to the shell command provided here.
"""
if getattr(func, "_flow_with_job", False):
raise RuntimeError(
"@cmd should appear below the @with_job decorator in your script"
)
setattr(func, "_flow_cmd", True)
return func | 594d350767a2dc052a776bb2aa260ec7fbd76649 | 695,566 |
def Normalize(tensor, mean, std):
"""
Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel to `channel = (channel - mean) / std`
Given mean: GreyLevel and std: GrayLevel,
will normalize the channel to `channel = (channel - mean) / std`
:param tensor: image tensor to be normalized
:param mean: mean of every channel
:param std: standard variance of every channel
:return: normalized tensor
"""
for t, m, s in zip(tensor, mean, std):
t -= m
t /= s
return tensor | e6eb671b104fce84420da9fb641020fb6f43e2f8 | 695,567 |
import struct
def double_2_bytes(d, is_little_endian=False):
"""
:param d:
:param is_little_endian:
:return:
"""
# 小端数据返回
if is_little_endian:
return struct.pack('<d', d)
# 大端数据返回
return struct.pack('>d', d) | ce63cd6546314045b516f08eae8938ac9f9914d8 | 695,568 |
import os
def generate_file_path(directory, name):
"""
Generates the path of a file
:param directory: the absolute path of the folder storing all the excel files
:param name: the name of one file
:return: a string of the file's absolute path
"""
return os.path.join(directory, name) | 487d28e2ab6a7529fc9b715cb8de73b968fc934f | 695,569 |
import re
def to_squad_para_dev(sample):
"""
This function convert development data from MRQA format to SQuAD format.
:param sample: one sample in MRQA format.
:return: paragraphs in SQuAD format.
"""
squad_para = dict()
context = sample['context']
context = re.sub(r'\[TLE\]|\[DOC\]|\[PAR\]', '[SEP]', context)
squad_para['context'] = context
qas = []
for qa in sample['qas']:
org_answers = qa['answers']
answers = []
for org_answer in org_answers:
answer = {
'text': org_answer,
'answer_start': -1
}
answers.append(answer)
squad_qa = {
'question': qa['question'],
'id': qa['qid'],
'answers': answers
}
qas.append(squad_qa)
squad_para['qas'] = qas
return squad_para | 22acc803cffcc7e293d7ba3f996a00c5a3dc26b4 | 695,570 |
import os
def _base(path):
""" Get base from path (name of the top level folder)
"""
return os.path.dirname(os.path.dirname(path)) | c854c6c4e89d58c13d7ad93811dbc0ee403cd472 | 695,571 |
import argparse
def get_args():
""" Parse arguments and return dictionary. """
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port')
parser.add_argument('-V', '--version', action='store_true',
help='prints the Poet version number and exits')
return parser.parse_args() | d44409a766d90efe224aca0f512cc90a1aff5586 | 695,573 |
def _strong_gens_from_distr(strong_gens_distr):
"""
Retrieve strong generating set from generators of basic stabilizers.
This is just the union of the generators of the first and second basic
stabilizers.
Parameters
==========
``strong_gens_distr`` - strong generators distributed by membership in basic
stabilizers
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.named_groups import SymmetricGroup
>>> from sympy.combinatorics.util import (_strong_gens_from_distr,
... _distribute_gens_by_base)
>>> S = SymmetricGroup(3)
>>> S.schreier_sims()
>>> S.strong_gens
[(0 1 2), (2)(0 1), (1 2)]
>>> strong_gens_distr = _distribute_gens_by_base(S.base, S.strong_gens)
>>> _strong_gens_from_distr(strong_gens_distr)
[(0 1 2), (2)(0 1), (1 2)]
See Also
========
_distribute_gens_by_base
"""
if len(strong_gens_distr) == 1:
return strong_gens_distr[0][:]
else:
result = strong_gens_distr[0]
for gen in strong_gens_distr[1]:
if gen not in result:
result.append(gen)
return result | 03a81597659dbec7e537d7f0b50825ae8983e318 | 695,574 |
def bounding_box_naive(points):
"""returns a list containing the bottom left and the top right
points in the sequence
Here, we use min and max four times over the collection of points
"""
bot_left_x = min(point[0,0] for point in points)
bot_left_y = min(point[0,1] for point in points)
top_right_x = max(point[0,0] for point in points)
top_right_y = max(point[0,1] for point in points)
return [(int(bot_left_x), int(bot_left_y)), (int(top_right_x), int(top_right_y))] | 35a9e573edf7c60b15950b61c4bca7bd01adbec8 | 695,575 |
def allocate_responses(events_df, responses, response_times, response_window=1):
"""
Assign responses to task trials.
"""
# Let's start by locating target trials
task_types = ['oddball', 'oneback', 'twoback']
response_times = response_times[:] # copy
target_trial_idx = events_df['trial_type'].isin(task_types)
nontarget_trial_idx = ~target_trial_idx
events_df['response_time'] = 'n/a'
events_df['accuracy'] = 'n/a'
events_df['classification'] = 'n/a'
# Defaults
events_df.loc[events_df['trial_type'] == 'category', 'classification'] = 1
events_df.loc[events_df['trial_type'] == 'category', 'classification'] = 'true_negative'
events_df.loc[target_trial_idx, 'accuracy'] = 0 # default to miss
events_df.loc[target_trial_idx, 'classification'] = 'false_negative'
# Log hits
for trial_idx in events_df.index[target_trial_idx]:
onset = events_df.loc[trial_idx, 'onset']
keep_idx = []
# Looping backwards lets us keep earliest response for RT
# Any response is *the* response, so the actual button doesn't matter.
for i_resp, rt in enumerate(response_times[::-1]):
if onset <= rt <= (onset + response_window):
events_df.loc[trial_idx, 'accuracy'] = 1
events_df.loc[trial_idx, 'response_time'] = rt - onset
events_df.loc[trial_idx, 'classification'] = 'true_positive'
else:
keep_idx.append(response_times.index(rt))
response_times = [response_times[i] for i in sorted(keep_idx)]
# Log false alarms
for trial_idx in events_df.index[nontarget_trial_idx]:
onset = events_df.loc[trial_idx, 'onset']
if trial_idx == events_df.index.values[-1]:
next_onset = onset + response_window # arbitrary duration
else:
next_onset = events_df.loc[trial_idx+1, 'onset']
# Looping backwards lets us keep earliest response for RT
for i_resp, rt in enumerate(response_times[::-1]):
if onset <= rt < next_onset:
# Ignore response window and use current trial's duration only
events_df.loc[trial_idx, 'accuracy'] = 0
events_df.loc[trial_idx, 'classification'] = 'false_positive'
events_df.loc[trial_idx, 'response_time'] = rt - onset
return events_df | 07fa3b249c02e9efecc4d9c3f1d5cac3a6be342f | 695,577 |
def to_grains(amount):
"""
Convert from cryptocurrency to grains
Parameters
----------
input: amount
Returns
-------
int
"""
return amount * 10 ** 8 | b2bc70531149c11f543ff924f29128f759490836 | 695,578 |
def generate_solution(x: int, n: int) -> int:
"""This is the "naive" way to compute the solution, for testing purposes.
In this one, we actually run through each element.
"""
counter = 0
for i in range(1, n + 1):
for j in range(1, n + 1):
if i * j == x:
counter += 1
return counter | fb074da93890f19b3aebce9babf43a9d29c68897 | 695,579 |
from functools import reduce
def _merge_max_mappings(*mappings):
"""Merge dictionaries based on largest values in key->value.
Parameters
----------
*mappings : Dict[Any, Any]
Returns
-------
Dict[Any, Any]
Examples
--------
>>> _merge_max_mappings({"a":1, "b":4}, {"a":2})
{"a":2, "b":4}
"""
def _merge_max(d1, d2):
d1.update((k, v) for k, v in d2.items() if d1.get(k, 0) < v)
return d1
return reduce(_merge_max, mappings, {}) | d93ca0eedd5c112a293d7f25902f56192924fa12 | 695,582 |
def template_responded_existence(trace, event_set):
"""
The responded existence(A, B) template specifies that
if event A occurs, event B should also occur (either
before or after event A).
:return:
"""
# exactly 2 event
assert (len(event_set) == 2)
event_1 = event_set[0]
event_2 = event_set[1]
if event_1 in trace:
if event_2 in trace:
return min(len(trace[event_1]), len(trace[event_2])), False
else:
return -1, False
return 0, True | b027a3bf07e006818e01360ef448c118738a50e5 | 695,583 |
import re
def text(elt):
"""Formatowanie danych z tabelki."""
obiekt = elt.text_content().replace(u'\xa0', u' ')
a = ""
if len(re.findall("\d+-\d", obiekt)) == 1:
a = re.findall("\d+-\d+", obiekt)[0]
return a
if obiekt.find("W") != -1:
a = re.findall("\d+", obiekt)[0]
if obiekt.find("V") != -1:
a = re.findall("\d+", obiekt)[0]
# obiekt = (f"Voltage:{a}")
if (obiekt.find("C") != -1):
a = re.findall("\d+", obiekt)[0]
# obiekt = (f"Temperature:{a}")
if (obiekt.find("Hz") != -1):
a = re.findall("\d+", obiekt)[0]
obiekt = (f"Frequency:{a}")
if len(re.findall("\d+-\d+-\d+", obiekt)) == 1:
a = obiekt.replace('\n', '')
# obiekt = (f"data:{a}")
if len(re.findall("-.", obiekt)) == 1:
return 0
return a | 335d4495dac18a168f6fadbd968992ea3a411f07 | 695,584 |
import random
def generate_grid(height = 10, width = 10, seed = None, sparcity = 75, separator = "#", gap = "."):
"""Create empty grid as base for crossword
:param height: number of rows in puzzle <1, 1000>
:param width: number of columns in puzzle <1, 1000>
:param seed: number used as seed in RNG
:param sparcity: percentage of grid to fill with characters (0, 100)
:param separator: character used as BLANK
:param gap: character used to indicate space to fill
:raise ValueError: if arguments have incorrect value
:raise TypeError: if arguments have incorrect type
:returns: empty grid
"""
if not 1 <= height <= 1000:
raise ValueError("Incorrect height")
if not 1 <= width <= 1000:
raise ValueError("Incorrect width")
if not 0 < sparcity < 100:
raise ValueError("Incorrect sparcity")
if not (separator.isascii() and len(separator)==1):
raise TypeError("Separator has to be single character")
if not (gap.isascii() and len(gap)==1):
raise TypeError("Gap has to be single character")
if separator==gap:
raise ValueError("Separator cannot be equal to gap character")
random.seed(seed)
grid = [[gap for x in range(width)] for y in range(height)]
for stop in random.sample(range(0, height * width), (100 - sparcity)*(height * width)//100):
grid[stop//width][stop%width] = separator
return grid | a0e9d8171f2b3cca7b6bacc3444a1de447ba5069 | 695,585 |
def testing_decorator(func):
"""Custom decorator for testing."""
def wrapper(arg):
func(arg)
return wrapper | cc44c9178175dbbb535b01a6727f29a61b60fdd7 | 695,586 |
import subprocess
def pypi_registry(package_name):
"""
Validate package build version vs. pypi version if exists
Returns:
Full version signature if package || N/A
exists in pypi registry ||
"""
installed_cmd = 'pip3 show {} 2>/dev/null'.format(package_name)
search_cmd = 'pip3 show {} | grep Version: 2>/dev/null'.format(package_name)
try:
r = subprocess.getoutput(search_cmd)
if 'Version:' in r:
return r.split(':')[1].strip()
else:
# package not found in pypi database
return ''
except Exception:
return None | efc9f236f9d13e7591395246bbafb7fd2d479778 | 695,587 |
def _find_actors(begin_tags, event_tags):
"""
Finds words that belong to the same actor or event and categorize them into actors.
Each different argument represents a different actor. Arguments start at the begin tag (B).
@param begin_tags: result of normalized_sent_tags - boolean list of tags that begin SRL arguments
@param event_tags: result of find_events - boolean list of words that represent events
@return: List of the actors that are represented by each word in the sentence
"""
actor, i, actor_tags, event, is_event = False, 0, [], 1, False
for btag, etag in zip(begin_tags, event_tags):
if etag:
is_event = True
elif (not etag) & is_event:
is_event = False
event += 1
if btag & (not etag):
actor = True
i += 1
actor_tags.append("T" + str(i))
continue
if etag:
actor_tags.append("EVENT" + str(event))
actor = False
continue
if (not btag) & actor:
actor_tags.append("T" + str(i))
else:
actor_tags.append(actor_tags[-1]) # In case everything else fails, just join with previous actor
return actor_tags | 00121cf8abb94c5dbe49f46e3e719dccc2a5eff9 | 695,588 |
def large_or_odd(rect, avg):
"""An odd shape."""
return rect.area() > (100 * avg * avg) or rect.aspect() < 0.2 \
or rect.aspect() > 10 | ab302f85b919af0e80b6a64d35e77757fdda2c00 | 695,589 |
def multiply(value, amount):
""" Converts to float and multiplies value. """
try:
return float(value) * amount
except ValueError:
# If value can't be converted to float
return value | 7e44491f9d400b9815c172b030c8fe50bb12f3e2 | 695,590 |
from typing import Dict
def get_existing_mapped_pipestep(func_name:str, code_to_func:Dict[str,int]):
""" Given an existing mapping of function:id, return
the id associated with the function
Will raise an error if the function does not already
have an entry in the mapping
:param func_name: the name of the function to be mapped
:param code_to_func: the existing function:id mapping
:returns: (the mapping dictionary, id of the function)
"""
func_to_code = {v:int(k) for k,v in code_to_func.items()}
# if we've already mapped this function
if func_name in func_to_code:
return code_to_func, func_to_code[func_name]
else:
raise ValueError(f'{func_name} not already in mapping') | aea6c32af42cd2b80b720b383308258093ad3c73 | 695,591 |
import hmac
import hashlib
import base64
def _GetSignature(key, url):
"""Gets the base64url encoded HMAC-SHA1 signature of the specified URL.
Args:
key: The key value to use for signing.
url: The url to use for signing.
Returns:
The signature of the specified URL calculated using HMAC-SHA1 signature
digest and encoding the result using base64url.
"""
signature = hmac.new(key, url, hashlib.sha1).digest()
return base64.urlsafe_b64encode(signature) | 3fd070040aee0bee67ae9517ba65005509ca1bfa | 695,592 |
import re
def normalize(string):
"""Give a canonical version of a given value string."""
string = (string or '').replace('E', 'e')
string = re.sub('(?<!e)-', ' -', string)
string = re.sub('[ \n\r\t,]+', ' ', string)
string = re.sub(r'(\.[0-9-]+)(?=\.)', r'\1 ', string)
return string.strip() | b4fd022df5f8625b67b2e17cd77a3a627ccc81d8 | 695,593 |
from imp import find_module
from os.path import realpath, dirname, basename, splitext
def _check_if_pyc(fname):
"""Return True if the extension is .pyc, False if .py
and None if otherwise"""
# Normalize the file-path for the find_module()
filepath = realpath(fname)
dirpath = dirname(filepath)
module_name = splitext(basename(filepath))[0]
# Validate and fetch
try:
fileobj, fullpath, (_, _, pytype) = find_module(module_name, [dirpath])
except ImportError:
raise IOError(
"Cannot find config file. " "Path maybe incorrect! : {0}".format(filepath)
)
return pytype, fileobj, fullpath | a12a9f25bb861dc2a1e57ae59f8c52b4a2985df4 | 695,594 |
def quadratic(v0, v1, v2, t):
"""
calculates the quadratic curve interpolation
:param v0: <OpenMaya.MVector>
:param v1: <OpenMaya.MVector>
:param v2: <OpenMaya.MVector>
:param t: <float>
:return:
"""
point_final = {}
point_final.update(x=((1 - t) ** 2) * v0.x +
(1 - t) * 2 * t * v1.x +
t * t * v2.x)
point_final.update(y=((1 - t) ** 2) * v0.y +
(1 - t) * 2 * t * v1.y +
t * t * v2.y)
point_final.update(z=((1 - t) ** 2) * v0.z +
(1 - t) * 2 * t * v1.z +
t * t * v2.z)
return point_final | c825239237b43582e3437a6854419c76fb2f1638 | 695,595 |
import collections
def filter_dataframes(dfs, xs, ys, table_ys, args_list, valid_keys):
"""Process necessary information from dataframes in the Bokeh format.
In the following explanation, N is assumed to be the number of experiments.
For xs_dict and ys_dict:
These are dictionary of list of list.
To make it simple, we focus on particular `x` in `xs`. Everything is
the same for `ys_dict`.
`x` is usually a timescale values such as iteration or epoch.
Here are some characteristics:
1. xs_dict[x] is list of list
2. len(xs_dict[x]) == N
3. xs_dict[x][i] is list. For example, if log is recorded every
epoch and `x` is epoch, xs_dict[x][i] == [1, 2, 3, 4, ...].
For tables:
This is a dictionary of list of scalars or strings.
The keys correspond to the column keys of the data table.
The keys are the combination of all `valid_keys` and `table_ys`.
tables[key][i] is `key` value recorded in the i-th experiment.
For example, if key=='main/loss', this is the minimum loss value during
training time recorded for the i-th experiment.
Args:
dfs (list of pd.DataFrame)
xs (list of strings)
ys (list of strings)
table_ys (dictionary)
args_list (list of dictionaries)
valid_keys (list of strings)
"""
# Descs: descriptions
# ys_dict == {string (y): List(Serial Data)}
xs_dict = {x: [] for x in xs}
ys_dict = {y: [] for y in ys}
tables = collections.OrderedDict(
[(key, []) for key in ['index'] + valid_keys + list(table_ys.keys())])
for i, args in enumerate(args_list):
# get df from a result
tmp = dfs
for key, val in args.items():
if val is None:
tmp = tmp[tmp[key].isnull()]
else:
tmp = tmp[tmp[key] == val]
for x in xs:
xs_dict[x].append(tmp[x].values.tolist())
for y in ys:
ys_dict[y].append(tmp[y].values.tolist())
for table_y, value_type in table_ys.items():
if value_type == 'min':
tables[table_y].append(tmp[table_y].min())
elif value_type == 'max':
tables[table_y].append(tmp[table_y].max())
else:
raise ValueError
for key in valid_keys:
if key in args:
tables[key].append(args[key])
else:
tables[key].append(None)
tables['index'] = list(range(len(args_list)))
return xs_dict, ys_dict, tables | 903ce855378d174370117d9cb729f1052c682ac4 | 695,596 |
def get_sale(match):
"""Convert the input into a dictionary, with keys matching
the CSV column headers in the scrape_util module.
"""
sale = {
'cattle_head': match.group('head'),
'cattle_cattle': match.group('cattle').strip(),
'cattle_avg_weight': match.group('weight'),
'cattle_price_cwt': match.group('price').replace(',', ''),
}
return sale | df246e6b7fb5dd7a084145d4c46f2f4147c05e1e | 695,597 |
def parse_cfg(filename):
"""
Inputs:
- cfg's file name, e.g. 'yolov3.cfg'
Returns:
- a list of NN blocks, each block is represented as a dictionary
"""
file = open(filename, 'r')
lines = file.read().split('\n')
lines = [x.rstrip().lstrip() for x in lines if len(x) > 0 and x[0] != '#']
blocks = []
block = {}
for line in lines:
if line[0] == '[':
if len(block) != 0:
blocks.append(block)
block = {}
block['type'] = line[1:-1].rstrip()
else:
s = line.split('=')
block[s[0].lstrip().rstrip()] = s[1].lstrip().rstrip()
blocks.append(block)
return blocks | f953dfa6304732572c058a01b42de83f1dbc9eb5 | 695,598 |
def preprocess_remove_dots(raw_img):
"""
The preprocess function for removing the dots
from the image.This function is not used due
to high time complexity and poor results.
Parameters
----------
raw_img : array
The array to be processed
Returns
----------
raw_img: array
The processed array
"""
for i in range(0, len(raw_img)):
for j in range(0, len(raw_img[i])):
isAlone = True
nStart = -1
nEnd = 2
mStart = -1
mEnd = 2
if i == 0:
nStart = 0
if i == len(raw_img):
nEnd = 1
if j == 0:
Start = 0
if j == len(raw_img[i]):
mEnd = 1
for n in range(nStart, nEnd):
for m in range(mStart, mEnd):
if n == m:
continue
if raw_img[n][m] == 1.0:
isAlone = False
break
if isAlone:
raw_img[i][j] = 0
return raw_img | bd736bcfe1c1d3b4d52d743d4956ede0bfc2064e | 695,599 |
def isclose(a, b, rel_tol=1e-04, abs_tol=0.0):
"""
Function to compare if 2 values are equal with a precision
:param a: Value 1
:param b: Value 2
:return:
"""
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) | f1234bc5bce7c42f85cab2be4a6f9b65dbe90c67 | 695,600 |
from typing import Union
from typing import List
def format_as(item: Union[int, List[int]], kind: str) -> Union[str, List[str]]:
"""Normalize a single integer or a list of integers """
def _normalize_element(element: int, kind: str) -> str:
if kind == 'sequence':
return "{:04d}".format(element)
elif kind == 'frame':
return "{:06d}".format(element)
raise ValueError(f"kind must be 'sequence' or 'frame': {kind}")
if isinstance(item, int):
return _normalize_element(item, kind)
elif isinstance(item, list):
return [_normalize_element(element, kind) for element in item] | 7df915a2f5dc26fae0f8840a9dc61ab5c3ef2a23 | 695,602 |
def _cnx_is_empty(in_file):
"""Check if cnr or cns files are empty (only have a header)
"""
with open(in_file) as in_handle:
for i, line in enumerate(in_handle):
if i > 0:
return False
return True | 1f5706e3b3733527baedd4df240f0be88adb80a5 | 695,603 |
def from_none(exc):
"""Emulates raise ... from None (PEP 409) on older Python-s
"""
try:
exc.__cause__ = None
except AttributeError:
exc.__context__ = None
return exc | 1cf96c58c7ef601a98a6891d2f969da8799f99b2 | 695,604 |
def error_503(_):
"""Maintenance."""
return 'PixyShip is down for maintenance', 503 | 3acecece1da3191d699fd58ebcd840494cf31b4c | 695,605 |
def cleanup_time_string(t):
"""
convert from microseconds to seconds, and only output 3 s.f.
"""
time_in_seconds = float(t) / 1e6
if time_in_seconds < 1:
time_in_seconds = round(time_in_seconds, 5)
else:
time_in_seconds = int(time_in_seconds)
timestring = str(time_in_seconds)
return timestring | dccbab566aa4112468cfa7ea0d059475ad76d65b | 695,606 |
def k_steamtrap_boiler(m_steam_boil, delta_P_boiler):
"""
Calculates the steam trap for boiler.
Parameters
----------
m_steam_boil : float
The flow rate steam of boiler, [t/h]
delta_P_boiler : float
The differential pressure between steam pressure and atmospheric pressure, [MPa]
Returns
-------
k_steamtrap_boiler : float
The steam trap for boiler, [t/h]
References
----------
&&&&&&&&&&&&
"""
return 0,575 * m_steam_boil / (delta_P_boiler)**(0,5) | 85705b056b138c636e7ae340a25acc1c36b2aaa4 | 695,607 |
def has_operator(context, x, y):
"""
'has' operator of permission if
This operator is used to specify the user object of permission
"""
user = x.eval(context)
perm = y.eval(context)
if isinstance(perm, (list, tuple)):
perm, obj = perm
else:
obj = None
return user.has_perm(perm, obj) | 5f00c42cb7d59e515cb7a017b058b40edbfd5bd7 | 695,608 |
def list_copy(_list):
"""
copy a list
"""
# http://stackoverflow.com/questions/2612802/how-to-clone-or-copy-a-list
#new_list = l[:]
return list(_list) | 1ed90f9091dca3a86bfcfb2db81841e051ccfafa | 695,609 |
def anyfalse(bools):
"""Returns True iff any elements of iterable `bools` are False
>>> anyfalse([True, True])
False
>>> anyfalse([True, False, True])
True
>>> anyfalse(None)
False
"""
if bools is None: return False
for b in bools:
if not b:
return True
return False | 1d846b588705349b8fb8f0fd8593c0c87809bd5d | 695,610 |
def _truncate_dataset_to_batch_size(batch_size, data, *other_data):
"""Truncate given input data to a multiple of the batch size.
Parameters
----------
batch_size : int
The batch size. The length of the truncated data is a multiple of this
value.
data : np.ndarray
The first input array.
*other_data
Additional input arrays; must be of type np.ndarray.
Returns
-------
List[np.ndarray]
The truncated data. The length of each entry in the list is a multiple
of the batch size.
"""
num_batches = int(len(data) / batch_size)
new_len = num_batches * batch_size
dataset = [data] + list(other_data)
if new_len < len(data):
print(
"WARNING dataset length is not a multiple of batch size. "
"Truncating from %d to %d." % (len(data), new_len))
dataset = [x[:new_len] for x in dataset]
return num_batches, dataset[0] if len(dataset) == 1 else dataset | 548f922d534aa3523cf8c0824fc7c7e12441088b | 695,611 |
def _sanitize(name):
"""
Examples
--------
>>> _sanitize('ROMS/TOMS')
'ROMS_TOMS'
>>> _sanitize('USEAST model')
'USEAST_model'
>>> _sanitize('GG1SST, SST')
'GG1SST_SST'
"""
name = name.replace(', ', '_')
name = name.replace('/', '_')
name = name.replace(' ', '_')
name = name.replace(',', '_')
return name | f059f48f828aaccb2499f250e67ea9e566cd23e0 | 695,613 |
def hexify(env, req):
"""
Convert integer parameter to hex
"""
return str(hex(int(req.match.group(1)))) | ea70d5df380e08cad60247345aa6b22c5eb6bb66 | 695,614 |
def ustawienia():
"""Funkcja pobiera ilość losowanych liczb, maksymalną losowaną wartość
oraz ilość prób. Pozwala określić stopień trudności gry."""
while True:
try:
ile = int(input("Podaj ilość typowanych liczb: "))
maks = int(input("Podaj maksymalną losowaną liczbę: "))
if ile > maks:
print("Błędne dane!")
continue
ilelos = int(input("Ile losowań: "))
return (ile, maks, ilelos)
except ValueError:
print("Błędne dane!")
continue | d4b0fced6f2a2de4200767067be095e6c33842c3 | 695,615 |
def corners_to_center_scale(p0, p1):
"""Convert bounding boxes from "corners" form to "center+scale" form"""
yx = 0.5 * (p0 + p1)
hw = p1 - p0
return yx, hw | ea313fbe774d29c4b05a886323425730a1b409af | 695,616 |
def wait(method):
"""
Decorator to wait for previous timers and to start a new one on exit
:param:
- `method`: method to wrap with a timer.wait call
:return: wrapped method
"""
def _method(self, *args, **kwargs):
# wait if timer is running but only up until the time-limit
self.timer.wait(self.timer.seconds)
self.timer.clear()
outcome = method(self, *args, **kwargs)
self.timer.start()
return outcome
return _method | c02bede214a3891ea50bc317db123dd3d32e8044 | 695,617 |
def condition_domain_reduction(csp, var) :
"""Returns True if var should be enqueued under the all-reduced-domains
condition, otherwise False"""
return True | 968c44431058d9c1007bb11f4f247c70e6865a71 | 695,618 |
def extractTwos(m):
"""m is a positive integer. A tuple (s, d) of integers is returned
such that m = (2 ** s) * d."""
# the problem can be break down to count how many '0's are there in
# the end of bin(m). This can be done this way: m & a stretch of '1's
# which can be represent as (2 ** n) - 1.
assert m >= 0
i = 0
while m & (2 ** i) == 0:
i += 1
return i, m >> i | e09bf5259648f2cfd30965ea528cd456b8d6c1e4 | 695,619 |
def log_url(ip, port, project, spider, job):
"""
get log url
:param ip: host
:param port: port
:param project: project
:param spider: spider
:param job: job
:return: string
"""
url = 'http://{ip}:{port}/logs/{project}/{spider}/{job}.log'.format(
ip=ip, port=port, project=project, spider=spider, job=job
)
return url | 2e61796b213698bc70b1ebf968d31fa678e8258e | 695,620 |
import os
def real_path(path: str) -> str:
"""Real path.
Args:
path: relative or absolute path
Returns:
Absolute path to provided path
str
"""
if path[0] == '~':
root = os.path.expanduser('~')
path = path[1:]
elif path[0] == '/':
root = '/'
else:
root = os.getcwd()
return f'{root}/{path}'.replace('//', '/').replace('//', '/') | 87b4368a3ee938feb46dd64cf7342c17968954cd | 695,621 |
def foo():
"""
This is crappy function. should be removed using git checkout
"""
return None | 2e91a12e4c14e903f3c9b0c62567c44880872319 | 695,622 |
def part1(entries: list) -> int:
"""part1 solver take a list and return an int"""
feet = 0
for gift in entries:
(length, width, height) = gift
side1 = length*width
side2 = length*height
side3 = width*height
feet += 2 * (side1 + side2 + side3) + min(side1, side2, side3)
return feet | dcf9b4d396216b90f2c8d3cfa4d054ce6678cb8a | 695,623 |
import os
from pathlib import Path
def parse_dataset_as_name(name: str) -> str:
"""
Constructs a valid prefix-name from a provided file path.
Args:
name: str path to some valid data/manifest file or a python object that
will be used as a name for the data loader (via str() cast).
Returns:
str prefix used to identify uniquely this data/manifest file.
"""
if os.path.exists(str(name)) or os.path.isdir(str(name)):
name = Path(name).stem
else:
name = str(name)
# cleanup name
name = name.replace('-', '_')
if 'manifest' in name:
name = name.replace('manifest', '')
if 'dataset' in name:
name = name.replace('dataset', '')
if '_' != name[-1]:
name = name + '_'
return name | d0197d3e7073b15bf3388b4b5166084ffcd95df9 | 695,624 |
def startWithArabic(Instr):
"""
this function return true if the given string starts with am Arabic numeral
"""
return Instr[:1].isdigit() | 6785bd9542deff9ed69325eb785e6417a6d5aa05 | 695,625 |
def logo():
"""
Prints the logo
Returns
---------
str
Return the logo string.
"""
return r"""
_____ _ _____ _ _
/ ____| | | | __ \ (_) (_)
| (___ _ _| |__ | | | | ___ _ __ ___ __ _ _ _ __ _ _______ _ __
\___ \| | | | '_ \| | | |/ _ \| '_ ` _ \ / _` | | '_ \| |_ / _ \ '__|
____) | |_| | |_) | |__| | (_) | | | | | | (_| | | | | | |/ / __/ |
|_____/ \__,_|_.__/|_____/ \___/|_| |_| |_|\__,_|_|_| |_|_/___\___|_|Version 2.0
Find interesting Subdomains and secrets hidden in page, folder, External Javascripts and GitHub
""" | 29a303a5509c352bf2453e3e108e790ece7bef44 | 695,626 |
def format_for_null(value):
"""If a Python value is None, we want it to convert to null in json."""
if value is None:
return value
else:
return "{}".format(value) | 004ada8d8496705c2afd42b82feef3c7a6e38079 | 695,627 |
import requests
import csv
import json
def get_file_bl(file_bl_url):
""" Gets file ioc from blacklist
:param file_bl_url: url for file blacklist
:return: list of file IOCs in Key:Value format
"""
file_ioc_list = list()
file_ioc_request_data = requests.get(file_bl_url, timeout=1)
tmp_dict = csv.DictReader(file_ioc_request_data.text.splitlines())
for ioc in tmp_dict:
file_ioc_list.append(json.dumps(ioc))
return file_ioc_list | 2ed3e53d4ac8833e5625cc1f5591e0343a5a75f2 | 695,628 |
def getIIsum(data, U, B):
"""
Compute summed area as:
A=U Bi=U[0],B[1]
+----------+
| |
| |
+----------+
C=B[0],U[1] D=B
\sum = I(D) - I(A) + I(Bi) + I(C)
"""
if (U == B):
return data[U]
else:
return (data[B] + data[U]) - (data[U[0], B[1]] + data[B[0], U[1]]) | 25f1b937036323a17465382df6a076f3269775fd | 695,629 |
def read_stopwords(fileName='stopwords.txt', lower_case=True):
"""Reads a list of stopwords from a file. By default the words
are read from a standard repo location and are lower_cased.
:param fileName a stopword file name
:param lower_case a boolean flag indicating if lowercasing is needed.
:return a list of stopwords
"""
stopwords = set()
with open(fileName) as f:
for w in f:
w = w.strip()
if w:
if lower_case:
w = w.lower()
stopwords.add(w)
return stopwords | 8caa6e6a32dfa45d72624bf154d1cdc206507fc6 | 695,630 |
def _addHeaderToRequest(request, header):
"""
Add a header tuple to a request header object.
@param request: The request to add the header tuple to.
@type request: L{twisted.web.http.Request}
@param header: The header tuple to add to the request.
@type header: A L{tuple} with two elements, the header name and header
value, both as L{bytes}.
@return: If the header being added was the C{Content-Length} header.
@rtype: L{bool}
"""
requestHeaders = request.requestHeaders
name, value = header
values = requestHeaders.getRawHeaders(name)
if values is not None:
values.append(value)
else:
requestHeaders.setRawHeaders(name, [value])
if name == b"content-length":
request.gotLength(int(value))
return True
return False | a42f99b922b5234671360b93d0a6326ca24cf6a2 | 695,631 |
import click
def _validate_month(ctx, param, val):
"""Helper function to validate a month coming from the CLI."""
if val < 1 or val > 12:
raise click.BadParameter('Month must be between 1 and 12')
return val | a38de3a9d62a38f90e12fad675a810eadf9fee22 | 695,632 |
def softmax_est_crossentropy_deriv(y_est, y):
"""
Compute the gradient of the multiclass softmax cross-entropy
with respect to its input variables, given only the output
of the softmax function
Parameters
----------
y_est: ndarray(N)
Output of the softmax function
y: ndarray(N)
Target values
Returns
-------
ndarray(N):
Derivative of multiclass softmax cross-entropy
"""
return y_est - y | f2519967529c3fe79322898391a347eaf306c804 | 695,633 |
def reads_by_prefix(config):
"""Return read meta by prefix"""
reads = {}
if "reads" not in config:
return {}
for strategy in ("paired", "single"):
if not strategy in config["reads"] or not config["reads"][strategy]:
continue
for entry in config["reads"][strategy]:
if isinstance(entry, list):
meta = {
"prefix": entry[0],
"platform": entry[1],
"strategy": strategy,
"base_count": entry[2],
"file": entry[3],
}
if len(entry) == 5:
meta["url"] = entry[4].split(";")
else:
meta = entry
reads.update({meta["prefix"]: meta})
return reads | 4d01e14eb032c1880c5932611dc6ba91fd7a1084 | 695,634 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.