content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def beginsField(line):
"""
Does the given (stripped) line begin an epytext or ReST field?
"""
if line.startswith("@"):
return True
sphinxwords = """
param params return type rtype summary var ivar cvar raises raise except
exception
""".split()
for word in sphinxwords:
if line.startswith(":" + word):
return True
return False | 5500fe3a165ac743f9a371fb120c7119e23eb54c | 690,595 |
def is_pair(part):
"""
>>> is_pair([4])
False
>>> is_pair([4, 4])
True
>>> is_pair([6, 9])
False
"""
if len(part) != 2:
return False
return part[0] == part[1] | d8bfbc8713250cb6d2c56cf2fa6e4b83c97619e0 | 690,596 |
import ast
def extract_attribute(module_name, attribute_name):
"""Extract metatdata property from a module"""
with open('%s/__init__.py' % module_name) as input_file:
for line in input_file:
if line.startswith(attribute_name):
return ast.literal_eval(line.split('=')[1].strip()) | 0d766ff44d4df1cc867c54e210ada3cdf3882293 | 690,597 |
from threading import local
def make_tls_property(default=None):
"""Creates a class-wide instance property with a thread-specific value."""
class TLSProperty(object):
def __init__(self):
self.local = local()
def __get__(self, instance, cls):
if not instance:
return self
return self.value
def __set__(self, instance, value):
self.value = value
def _get_value(self):
return getattr(self.local, 'value', default)
def _set_value(self, value):
self.local.value = value
value = property(_get_value, _set_value)
return TLSProperty() | b923a72a63908affc637e31208714100b0093ff2 | 690,598 |
import math
def distance(point):
"""
What comes in: An rg.Point.
What goes out: The distance that the rg.Point is from (0, 0).
Side effects: None.
Example:
If the argument is rg.Point(3, 4) this function returns 5.
"""
# This code has an error, on purpose. Do NOT fix it.
x_squared = point.x * point.x
y_squared = point.y * point.y
return math.sqrt(x_squared + y_squared) | 7f3c98fc9ac7a15a78908f27ecea822259cc922b | 690,599 |
def dread(infile, cols):
"""
reads in specimen, tr, dec, inc int into data[]. position of
tr, dec, inc, int determined by cols[]
"""
data = []
f = open(infile, "r")
for line in f.readlines():
tmp = line.split()
rec = (tmp[0], float(tmp[cols[0]]), float(tmp[cols[1]]), float(tmp[cols[2]]),
float(tmp[cols[3]]))
data.append(rec)
f.close()
return data | 5912f7f0873cfa2c11c66f588dcd10cfc21427fb | 690,600 |
def _transform_session_persistence(persistence):
"""Transforms session persistence object
:param persistence: the session persistence object
:returns: dictionary of transformed session persistence values
"""
return {
'type': persistence.type,
'cookie_name': persistence.cookie_name
} | b703cf02099c42df24cf110e3a96693adabca5d7 | 690,601 |
def backslashedp(c):
"""
BACKSLASHEDP char
BACKSLASHED? char
"""
## outputs TRUE if the input character was originally entered into
## Logo with a backslash (\) before it or within vertical bars (|)
## to prevent its usual special syntactic meaning, FALSE
## otherwise. (Outputs TRUE only if the character is a
## backslashed space, tab, newline, or one of ()[]+-*/=<>\":;\\~?|
## )
## @@: doesn't make sense for us.
return False | bfb59c9165046cc1f5d46e815c29b5a68bada246 | 690,602 |
def default_PSO_params(params):
"""
This is to set the default PSO parameters. Note by default constriction will be applied over inertia weighting.
To prevent this, specify the wdamp parameter that isn't 1.
Will display info by default
Early stopping is disabled by default
"""
if 'maxit' not in params:
params['maxit'] = 30
if 'n_part' not in params:
params['n_part'] = 15
if 'w' not in params:
params['w'] = 1
if 'wdamp' not in params:
params['wdamp'] = 1
if 'c1' not in params:
params['c1'] = 2.05
if 'c2' not in params:
params['c2'] = 2.05
if 'con' not in params and params['wdamp'] != 1 and params['w'] != 1:
params['con'] = 0.7298437881283576
if 'display_info' not in params:
params['display_info'] = 1
if 'early_stopping' not in params:
params['early_stopping'] = 0
if 'early_stopping_rounds' not in params:
params['early_stopping_rounds'] = 10
if 'BPSO' not in params:
params['BPSO'] = 0
if 'velocity_limit_scale' not in params and params['BPSO'] == 0:
params['velocity_limit_scale'] = 0.2
elif params['BPSO'] == 1:
params['velocity_limit_scale'] = 1
return params | 032b706b67c35a9c612dbf8da464fa39c89dfd08 | 690,603 |
import logging
def checkAndUpdateAlerts(dbManager, camera, timestamp, driveFileIDs):
"""Check if alert has been recently sent out for given camera
Args:
dbManager (DbManager):
camera (str): camera name
timestamp (int):
driveFileIDs (list): List of Google drive IDs for the uploaded image files
Returns:
True if this is a new alert, False otherwise
"""
# Only alert if there has not been a detection in the last hour. This prevents spam
# from long lasting fires.
sqlTemplate = """SELECT * FROM detections
where CameraName='%s' and timestamp > %s and timestamp < %s"""
sqlStr = sqlTemplate % (camera, timestamp - 60*60, timestamp)
dbResult = dbManager.query(sqlStr)
if len(dbResult) > 0:
logging.warning('Supressing new alert due to recent detection')
return False
dbRow = {
'CameraName': camera,
'Timestamp': timestamp,
'ImageID': driveFileIDs[0] if driveFileIDs else ''
}
dbManager.add_data('alerts', dbRow)
return True | 3f9549fdec6be4167ab9c1ad1398b3168fb7d138 | 690,604 |
def byte_notation(size: int, acc=2, ntn=0):
"""Decimal Notation: take an integer, converts it to a string with the
requested decimal accuracy, and appends either single (default), double,
or full word character notation.
- Args:
- size (int): the size to convert
- acc (int, optional): number of decimal places to keep. Defaults to 2.
- ntn (int, optional): notation name length. Defaults to 0.
- Returns:
- [tuple]: 0 = original size int unmodified; 1 = string for printing
"""
size_dict = {
1: ['B', 'B', 'bytes'],
1000: ['k', 'kB', 'kilobytes'],
1000000: ['M', 'MB', 'megabytes'],
1000000000: ['G', 'GB', 'gigabytes'],
1000000000000: ['T', 'TB', 'terabytes']
}
return_size_str = ''
for key, value in size_dict.items():
if (size / key) < 1000:
return_size_str = f'{size / key:,.{acc}f} {value[ntn]}'
return size, return_size_str | 614bdfd883f0d875abe22e05186d2073380497b3 | 690,605 |
import copy
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d | 2d759603ad7cf1ada5741333be138f57957677f6 | 690,606 |
def is_seq_list(list_or_dict):
"""Checks to "seq" key is list or dictionary. If one seq is in the prefix-list, seq is a dictionary, if multiple seq,
seq will be list of dictionaries. Convert to list if dictionary"""
if isinstance(list_or_dict, list):
make_list = list_or_dict
else:
make_list = [list_or_dict]
return make_list | eb4ad947e2df2f8610002f02ddaf5d39d3b2329c | 690,607 |
import re
def str_to_unicode_emoji(s):
"""
Converts 'U+FE0E' to u'\U0000FE0E'
"""
return re.sub(r'U\+([0-9a-fA-F]+)', lambda m: chr(int(m.group(1), 16)), s).replace(' ', '') | f8a862374d1a77e73b4ee61d3dabefe3206d43f1 | 690,609 |
def prandtl(cp=None, mu=None, k=None, nu=None, alpha=None):
"""
Calculate the dimensionless Prandtl number for a fluid or gas.
.. math:: Pr = \\frac{c_p \\mu}{k} = \\frac{\\nu}{\\alpha}
Parameters
----------
cp : float
Specific heat [J/(kg⋅K)]
mu : float
Dynamic viscosity [kg/(m⋅s)]
k : float
Thermal conductivity [W/(m⋅K)]
nu : float, optional
Kinematic viscosity [m²/s]
alpha : float, optional
Thermal diffusivity [m²/s]
Returns
-------
pr : float
Prandtl number [-]
Examples
--------
>>> prandtl(cp=4188, mu=0.001307, k=0.5674)
9.647
>>> prandtl(nu=1.5064e-5, alpha=2.1002e-5)
0.71726
Raises
------
ValueError
Must provide (cp, mu, k) or (nu, alpha)
References
----------
Daizo Kunii and Octave Levenspiel. Fluidization Engineering.
Butterworth-Heinemann, 2nd edition, 1991.
"""
if cp and mu and k:
pr = (cp * mu) / k
elif nu and alpha:
pr = nu / alpha
else:
raise ValueError('Must provide (cp, mu, k) or (nu, alpha)')
return pr | 62a9ce6b458373d93c9cb4df4ccb705968e4a8b6 | 690,610 |
def sentence_selection(sentences):
"""
select sentences that are not only space and have more than two tokens
"""
return [
sent.strip()
for sent in sentences
if (sent or not sent.isspace()) and len(sent.split()) > 2
] | 0e6ec6082c39a2e2728b5813ecd463b7f2731b65 | 690,611 |
def check_consistency(header1,
header2):
"""
Return true if all critical fields of *header1* equal those of
*header2*.
"""
return (header1.Station_Name == header2.Station_Name and
header1.IAGA_CODE == header2.IAGA_CODE and
header1.Geodetic_Latitude == header2.Geodetic_Latitude and
header1.Geodetic_Longitude == header2.Geodetic_Longitude and
header1.Elevation == header2.Elevation and
header1.Reported == header2.Reported and
header1.Sensor_Orientation == header2.Sensor_Orientation and
header1.Digital_Sampling == header2.Digital_Sampling and
header1.Data_Interval_Type == header2.Data_Interval_Type and
header1.Data_Type == header2.Data_Type) | 7166f8acc10a5401364fc987a6b1b5b1e381a793 | 690,612 |
def is_feature_component_start(line):
"""Checks if a line starts with '/', ignoring whitespace."""
return line.lstrip().startswith("/") | f9ce6e88987f86e0b2116252e0aaa9fd449de567 | 690,613 |
def header(img, author, report_date, report_time, report_tz, title) -> str:
"""Creates reports header
Parameters
----------
img : str
Image for customizable report
author : str
Name of author responsible by report
report_date : str
Date when report is run
report_time : str
Time when report is run
report_tz : str
Timezone associated with datetime of report being run
title : str
Title of the report
Returns
-------
str
HTML code for interactive tabs
"""
return f"""
<div style="display:flex; margin-bottom:1cm;">
{img}
<div style="margin-left:2em">
<p><b>Analyst:</b> {author}</p>
<p><b>Date :</b> {report_date}</p>
<p><b>Time :</b> {report_time} {report_tz}</p>
<br/>
<p>{title}</p>
</div>
</div>""" | 484176e93ff7dcfc70e3c2adc54874a3a650d57c | 690,615 |
import re
def text_split(text):
""" Split a string of text.
"""
text_list = re.split('; |, | |\n+',text)
return [word for word in text_list if word] | 3917ffc2a0ea896b668d7db7e3e0fe72a768959c | 690,616 |
import copy
def flattened(header):
"""
This function ...
:param header:
:return:
"""
flat_header = copy.deepcopy(header)
flat_header["NAXIS"] = 2
if "NAXIS3" in flat_header: del flat_header["NAXIS3"]
for key in flat_header:
if "PLANE" in key: del flat_header[key]
return flat_header | 82eceaf1557b91534c9e8286019b95fac294333f | 690,617 |
def render_terms_text():
"""
Inclusion tag for terms of service.
Context::
Template::
terms_text.html
"""
return {} | 569018b2523c448f1c1948e319ff00f1acc9ebb8 | 690,618 |
import requests
def handle_response(r, http_method, custom_err):
"""
Handles the HTTP response and returns the JSON
Parameters
----------
r: requests module's response
http_method: string
"GET", "POST", "PUT", etc.
custom_err: string
the custom error message if any
Returns
-------
json : dict
"""
json = {}
if r.status_code == requests.codes.ok:
if r.text:
json = r.json()
else:
print("{0} returned an empty response.".format(http_method))
else:
if custom_err is not None:
print(custom_err)
print("Status code: " + str(r.status_code))
if r.text:
print(r.text)
r.raise_for_status()
return json | e22799a1e841263f74dc22adda76d1897f7fe725 | 690,619 |
from typing import OrderedDict
def _Net_blobs(self):
"""
An OrderedDict (bottom to top, i.e., input to output) of network
blobs indexed by name
"""
return OrderedDict([(bl.name, bl) for bl in self._blobs]) | 6c68e91a10fb9eda6c0be9ef03ffdd08823131d8 | 690,620 |
def is_cg_developed(properties):
"""Check if a colorgroup is fully developed."""
# check validity of colorgroup
assert len(set([c.color for c in properties])) == 1
return all([c.has_hotel for c in properties]) | 9ef92a7695b9deb73d2fdd78dc13df44b70599a1 | 690,622 |
def _matrix_to_vector(X):
"""
Returns a vector from flattening a matrix.
"""
u = X.reshape((1, -1)).ravel()
return u | 500290449bc0bac48b6ca6b7f59bf2f0d39bd216 | 690,623 |
def _prepare_agent_cmd(
ip_address: str,
machine_id: str,
cluster: str,
qnames: str,
workers_n=1,
):
"""
It will run nb agent command.
Name is not provided, so the agent will choose their name.
"""
cmd = f"nb agent -i {ip_address} -C {cluster} -q {qnames} -w {workers_n} -m {machine_id}"
return cmd | 17b2270ed850716e3f8cb74113a431c353f9163a | 690,624 |
def pad_guid_bytes(raw_bytes: bytes) -> bytes:
"""Pads a sequence of raw bytes to make them the required size of a UUID.
Note that if you're using an int as your source for instantiating a UUID,
you should not use this function. Just use UUID(your_int_here).
"""
if not (0 < len(raw_bytes) <= 16):
raise ValueError("Byte length must be between 1 and 16.")
return raw_bytes + (b'\x00' * (16 - len(raw_bytes))) | b1cf8f50a041ac63be1c208388b904f774e437f3 | 690,626 |
def file_info_equal(file_info_1, file_info_2):
"""Return true if the two file-infos indicate the file hasn't changed."""
# Negative matches are never equal to each other: a file not
# existing is not equal to another file not existing.
if (None, None, None) in (file_info_1, file_info_2):
return False
# Equal if the size and the mtimes match.
if file_info_1[:2] == file_info_2[:2]:
return True
# Even if mtimes don't match, they're equal if the size and the
# crcs match. But we have to be careful, since crcs are optional,
# so we don't do this test if the crcs are None.
if file_info_1[2] is not None and file_info_1[1:] == file_info_2[1:]:
return True
return False | 0cc17448dba034d65521c86a0f6d7c70b98cf02c | 690,627 |
import re
def sort_strings_by_regex_list(sl,rl):
"""Example:
sl = ["aaa.0.1.shared","aaa.0.01.shared","aaa.0.03.shared"]
rl = [r"0.01",r"0.03",r"0.1"]
print sort_strings_by_regex_list(sl,[re.escape(r) for r in rl])
>>> ["aaa.0.01.shared","aaa.0.03.shared","aaa.0.1.shared"]
"""
def rx_key(rl,s):
for (r_ind,r) in enumerate(rl):
if re.search(r,s):
return r_ind
return len(rl)
return sorted(sl,key = lambda s: rx_key(rl,s)) | d40de7144012fac4c3d73286ddd4eb8b7f40a5b6 | 690,628 |
def es_get_class_defs(cls_def, cls_name):
"""
Reads through the class defs and gets the related es class
defintions
Args:
-----
class_defs: RdfDataset of class definitions
"""
rtn_dict = {key: value for key, value in cls_def.items() \
if key.startswith("kds_es")}
for key in rtn_dict:
del cls_def[key]
return rtn_dict | 994813b878bef8c18862f94dd59631c629c14be5 | 690,629 |
def status_new_data_body(new_title: list, new_video: list) -> str:
"""Get the text body for updating status line data
Wrapped for short-circuit if no new data, and due to the complexity of
latter operations (a bit of a jumble).
@param new_title List of new (changed) video titles
@param new_video List of new videos
@return Body text (string)
"""
# Store the lengths, and check if there's nothing to do
l_t = len(new_title)
l_v = len(new_video)
if l_t == 0 and l_v == 0:
return "\033[34mNo new data\033[m"
# Initialise stores for text data
t_data_header = []
t_data_body = []
# If at least one changed title, add data
if l_t > 0:
s_t = "" if l_t == 1 else "s"
t_data_new_title = [
f"↳ \033[33mChanged title{s_t}\033[m:",
*map(lambda x: f" - {x[0]} => {x[1]}", new_title),
]
t_data_header.append(f"\033[33m{l_t} title{s_t} changed\033[m")
t_data_body.extend(t_data_new_title)
# If at least one new video, add data
if l_v > 0:
s_v = "" if l_v == 1 else "s"
t_data_new_video = [
f"↳ \033[32mNew video{s_v}\033[m:",
*map(lambda x: f" - {x}", new_video),
]
t_data_header.append(f"\033[32m{l_v} new video{s_v}\033[m")
t_data_body.extend(t_data_new_video)
# Join the body header text and body additional text
t_header = "; ".join(t_data_header)
t_body = "\n".join(t_data_body)
# Return the combined text for the body
return f"{t_header}\n{t_body}" | ce3a307dba2180fb1d7cf1460f3cf7a1a6c9c017 | 690,630 |
import random
def generateIndices(n_blocks, N, D):
"""
generates indices for block matrix computation.
Checked.
Input:
n_blocks: number of blocks to use.
N: number of samples.
D: number of genes.
Output:
y_indices_to_use[i][j] is the indices of block j in sample i.
"""
y_indices_to_use = []
idxs = list(range(D))
n_in_block = int(1. * D / n_blocks)
for i in range(N):
partition = []
random.shuffle(idxs)
n_added = 0
for block in range(n_blocks):
start = n_in_block * block
end = start + n_in_block
if block < n_blocks - 1:
idxs_in_block = idxs[start:end]
else:
idxs_in_block = idxs[start:]
partition.append(sorted(idxs_in_block))
n_added += len(idxs_in_block)
y_indices_to_use.append(partition)
if i == 0:
print('Block sizes', [len(a) for a in partition])
assert(n_added == D)
return y_indices_to_use | 8850db07af5811846cd80f6225b2a56b71284dc2 | 690,631 |
def get_target_name(prefix='', rconn=None):
"""Return wanted Telescope named target.
On failure, or no name returns empty string"""
if rconn is None:
return ''
try:
target_name = rconn.get(prefix+'target_name').decode('utf-8')
except:
return ''
return target_name | 6da7399e69f53ed37c3db29e7274eec29f2be83d | 690,632 |
import sys
import os
import fnmatch
def _get_standard_modules():
""" Return list of module names in the Python standard library. """
# Find library directories.
if sys.platform == 'win32': #pragma no cover
lib_dir = os.path.join(sys.prefix, 'Lib')
obj_dir = os.path.join(sys.prefix, 'DLLs')
obj_pattern = '*.dll'
roots = [lib_dir, obj_dir]
else:
lib_dir = os.path.join(sys.prefix, 'lib', 'python'+sys.version[0:3])
obj_dir = 'lib-dynload'
obj_pattern = '*.so'
roots = [lib_dir]
# Find 'real' sys.prefix (not that of virtualenv).
orig_prefix = None
try:
with open(os.path.join(lib_dir, 'orig-prefix.txt'), 'r') as inp:
orig_prefix = inp.read()
except IOError as error:
pass
# Add to roots.
if orig_prefix:
for root in tuple(roots):
roots.append(orig_prefix + root[len(sys.prefix):])
# Now scan for modules.
excludes = set(sys.builtin_module_names)
for root in roots:
for dirpath, dirnames, filenames in os.walk(root):
if 'test' in dirnames:
dirnames.remove('test')
if dirpath == root:
path = ''
if 'site-packages' in dirnames:
dirnames.remove('site-packages')
else:
path = dirpath[len(root)+1:]
if path == obj_dir:
path = ''
for name in filenames:
if fnmatch.fnmatch(name, '*.py*') or \
fnmatch.fnmatch(name, obj_pattern):
name = os.path.join(path, name[:name.rfind('.')])
name = name.replace(os.sep, '.')
if name.endswith('.__init__'):
name = name[:name.rfind('.')]
excludes.add(name)
return list(excludes) | 968ca6360c57af6a584a4c7791eb06f53e18473d | 690,633 |
import argparse
def parseArgs():
"""
Well, parse the arguments passed in the command line :)
"""
parser = argparse.ArgumentParser(description="Does a bunch of draining checks")
parser.add_argument('-t', '--twiki', action='store_true', default=False,
help='Use it to get an output that can be directly pasted in a twiki')
args = parser.parse_args()
return args | e245fdeee29da2c254515180c7b28c074edd9db8 | 690,634 |
import re
def quotemeta(text):
"""Quote letters with special meanings in pychart so that <text> will display
as-is when passed to canvas.show().
>>> font.quotemeta("foo/bar")
"foo//bar"
"""
text = re.sub(r'/', '//', text)
text = re.sub(r'\\{', '/{', text)
text = re.sub(r'\\}', '/}', text)
return text | 706a5b9ed9ee83b1330b7e8052372036ad6c651c | 690,635 |
def write_doc(file_name):
"""Write a converted document.
Parameters
----------
file_name : str
The converted document.
Returns
-------
out_doc : file
A file object that allows writing of document.
"""
return open(file_name, "wb") | 6f5ca1ed612bee3135adb262ee2ffd2511a8b4b0 | 690,636 |
def compute_delays(SOA):
""" calculate the delay time for color/word input
positive SOA => color is presented earlier, v.v.
Parameters
----------
SOA : int
stimulus onset asynchrony == color onset - word onset
Returns
-------
int,int
the delay time for color/word input, repsectively
"""
color_delay = max(0, -SOA)
word_delay = max(0, SOA)
return color_delay, word_delay | 3c048633501e75f0a46dc99d225819c9d0750a74 | 690,637 |
def selection_criteria_harris(l1, l2, k = 0.04):
"""
Si se quiere obtener los puntos Harris de una imagen utilizando el operador
Harris, es necesario seguir un criterio. En este caso se hará uso de los
autovalores de M (l1, l2) y de la constante k.
El criterio es: l1*l2 - k * (l1+l2)^2
"""
# Se devuelve el valor dado siguiendo el criterio
return ((l1*l2) - (k*((l1+l2)*(l1+l2)))) | 2a4d4f4c08f7ec572c2ab7d6dac63fa3bdc99b47 | 690,638 |
import binascii
import struct
def fw_int_to_hex(*args):
"""Pack integers into hex string.
Use little-endian and unsigned int format.
"""
return binascii.hexlify(
struct.pack('<{}H'.format(len(args)), *args)).decode('utf-8') | 059e328e725a0fc5b9630d71b3162f1ddfa7c0a4 | 690,639 |
import torch
def VDraw(x):
""" Generate a Gaussian distribution with the given mean(128-d) and
std(128-d).
"""
return torch.distributions.Normal(x[:, :128], x[:, 128:]).sample() | e85f25795a28acd15dc46a4aadd54e631a26bd7e | 690,640 |
import glob
import os
def get_cache_file_list(path):
"""
This function counts the number of json files under a given directory.
To save an system call the path is given as the argument
If the passed path is not a valid directory then the result is undefined
:return: int, as the number of json files
"""
return glob.glob(os.path.join(path, "*.json")) | 621851317f371a92d67d80e6a710bc029461d7c5 | 690,641 |
import math
def dist(p,q):
"""
Helper function to compute the "distance" of two
2D points.
"""
return math.sqrt((p[0] - q[0]) ** 2+(p[1] - q[1]) ** 2) | d15da20e31627aef4fb589fcbc6bae25adf7c32d | 690,644 |
from typing import Dict
def sra_id_to_app_input(sra_id: str) -> Dict:
"""Generate input from app for sra_fastq_importer
Set split files to false so we no merging is needed
Args:
sra_id:
Returns:
dictionary containing
"""
return {"accession": sra_id, "split_files": False} | bf1ca62df98932a05cb6fce476a361273f86c35e | 690,645 |
def div(num1, num2):
"""
Divide two numbers
"""
return num1 / num2 | 2d39f276196d913f6393335e5fbbdf5998a37a89 | 690,646 |
from datetime import datetime
def to_excel_ts(ts):
"""
Converts a datetime timestamp into Excel format date time
"""
EPOCH = datetime(1899, 12, 30)
delta = ts - EPOCH
return float(delta.days) + (float(delta.seconds) / 86400) | 7fa466aafa75254d468d4969c4c4c666abc09aaa | 690,647 |
def get_content_id(item):
"""Extract content id or uri."""
if item.item_class == "object.item.audioItem.musicTrack":
return item.get_uri()
return item.item_id | d4041481995ecea12aaaf6e0d60d82e4f31e8e1c | 690,648 |
import re
def get_declarations(code, qualifier=""):
""" Extract declarations of type:
qualifier type name[,name,...];
"""
if not len(code):
return []
variables = []
if isinstance(qualifier, list):
qualifier = "(" + "|".join([str(q) for q in qualifier]) + ")"
if qualifier != "":
re_type = re.compile(r"""
%s # Variable qualifier
\s+(?P<type>\w+) # Variable type
\s+(?P<names>[\w,\[\]\n =\.$]+); # Variable name(s)
""" % qualifier, re.VERBOSE)
else:
re_type = re.compile(r"""
\s*(?P<type>\w+) # Variable type
\s+(?P<names>[\w\[\] ]+) # Variable name(s)
""", re.VERBOSE)
re_names = re.compile(r"""
(?P<name>\w+) # Variable name
\s*(\[(?P<size>\d+)\])? # Variable size
(\s*[^,]+)?
""", re.VERBOSE)
for match in re.finditer(re_type, code):
vtype = match.group('type')
names = match.group('names')
for match in re.finditer(re_names, names):
name = match.group('name')
size = match.group('size')
if size is None:
variables.append((name, vtype))
else:
size = int(size)
if size == 0:
raise RuntimeError(
"Size of a variable array cannot be zero")
for i in range(size):
iname = '%s[%d]' % (name, i)
variables.append((iname, vtype))
return variables | c79cddaad4ba7d21c2ddf6061ddeae8b43688a93 | 690,649 |
def getkey(dict_, key, default=None):
"""Return dict_.get(key, default)
"""
return dict_.get(key, default) | fd874b56862e5d6094ea26b6dc30bc34c22ea496 | 690,650 |
import sys
import itertools
import gzip
def split_fastq_file_pbat(num_chunks, input_files, output_prefix):
"""
This function mimics the unix split utility.
"""
def reverse_complement(dna):
complement = {"A":"T","C":"G","G":"C","T":"A","N":"N"}
return("".join([complement[base] for base in reversed(dna)]))
if not isinstance(input_files, list):
if isinstance(input_files, str):
input_files = [input_files]
else:
sys.exit("input_files must be a list of strings")
file_handles = {}
for index in range(0,num_chunks):
file_handles[index]=open(output_prefix+str(index),'w')
cycle = itertools.cycle(list(range(0,num_chunks)))
total_reads=0
for inputf in input_files:
if inputf[-3:] == ".gz":
f = gzip.open(inputf,'rt')
else:
f = open(inputf,'r')
while True:
current_file = next(cycle)
# processing read id
# remove any string after the first space character
line = f.readline()
if not line:
break
# read id
line = line.rstrip()
file_handles[current_file].write(line.split(" ")[0]+"\n")
total_reads += 1
# seq
line = f.readline()
line = line.rstrip()
file_handles[current_file].write(reverse_complement(line)+"\n")
#
line = f.readline()
file_handles[current_file].write(line)
# qual
line = f.readline()
line = line.rstrip()
file_handles[current_file].write(line[::-1]+"\n")
f.close()
for index in range(0,num_chunks):
file_handles[index].close()
return(total_reads) | f46740b6e7fe4c821deeffb1d9cbaa615555121b | 690,651 |
from datetime import datetime
def _to_collected_format(date):
"""Convert input date format from '%Y%-m-%d' to '%Y%m%d'"""
return str(datetime.strptime(date, "%Y-%m-%d").strftime("%Y%m%d")) | ec9dd77f6ff58d26e3059b595f32c78ff0996c36 | 690,652 |
def _unpersist_broadcasted_np_array(broadcast):
"""
Unpersist a single pyspark.Broadcast variable or a list of them.
:param broadcast: A single pyspark.Broadcast or list of them.
"""
if isinstance(broadcast, list):
[b.unpersist() for b in broadcast]
else:
broadcast.unpersist()
return None | dbd43e27db1bad87a8b86f1e9e24a40ad4bfa558 | 690,653 |
from typing import Callable
from typing import Any
def is_callback(func: Callable[..., Any]) -> bool:
"""Check if function is safe to be called in the event loop."""
return getattr(func, "_mass_callback", False) is True | 0620699de8e4df5faeff52d524d327b4f4f4f6b8 | 690,654 |
def _to_str(s):
"""Downgrades a unicode instance to str. Pass str through as-is."""
if isinstance(s, str):
return s
# This is technically incorrect, especially on Windows. In theory
# sys.getfilesystemencoding() should be used to use the right 'ANSI code
# page' on Windows, but that causes other problems, as the character set
# is very limited.
return s.encode('utf-8') | 6df44a8c56bdadf767e11d5d784c83fe0bd842cc | 690,655 |
def Intersect(list1,list2):
"""
This function takes two lists and returns a list of items common to both
lists.
"""
ReturnList = []
for x in list1:
if x in list2: ReturnList.append(x)
return ReturnList | f190ae7723b7cccfbd144b6df500ce8cf8e2ead2 | 690,657 |
import math
def euler_problem_9(n=1000):
"""
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a^2 + b^2 = c^2
For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
assert n > 10
# first assume that a <= b < c < n/2. Then, for c to be an integer we can't have a=b.
# hence assume that a < b < c and that n/3 < 3.
# brute-force O(n^2) approach
for c in range(n // 2, n // 3, -1):
c_sq = c ** 2
for b in range(c - 1, int(c / math.sqrt(2)), -1):
a = n - c - b
if a ** 2 + b ** 2 == c_sq:
return a * b * c
return -1 | a4db0c09619d977027a45972abf092ec28062e7e | 690,658 |
import itertools
def _get_inputs(values):
""" Generate the list of all possible ordered subsets of values. """
power_set = set(
[
tuple(set(prod))
for prod in itertools.product(values, repeat=len(values))
]
)
power_perms = [itertools.permutations(comb) for comb in power_set]
ordered_power_set = []
for perm in power_perms:
for item in perm:
ordered_power_set.append(list(item))
return ordered_power_set | 5150d08954f9867a5d7c1fca3bb4384f4e89ff59 | 690,659 |
import re
def next_look_say(current_value):
"""
Given a numeric string, find it's 'look-and-say' value to determine the
next value in the sequence.
"""
# Split into groups of same consecutive digits
r = '(1+|2+|3+|4+|5+|6+|7+|8+|9+|0+)'
matches = re.findall(r, current_value)
return ''.join([str(len(m)) + m[0] for m in matches]) | 338ad3c563799bc163044441a608da1491c34a0a | 690,660 |
def set_union(sets):
"""Non variadic version of set.union.
"""
return set.union(*sets) | eda2b95f985e9f9fc9efa193de34008e376b1173 | 690,661 |
def solve_nu(H):
"""
求解 μ
:param H: 一系列H
:return:
"""
N = [H[i] / (H[i] + H[i + 1]) for i in range(len(H) - 1)]
for i in range(len(N)):
print('μ{} = {}'.format(i + 1, N[i]))
return N | 8b2b25cfce53c57cf5a12df024800973c3306d95 | 690,662 |
def set_tags(config, tags):
""" set gloabal tags setting in config """
config['tags'] = tags
return True | c0c946337d6f1a9511db694ed9192e192c4658ec | 690,663 |
import warnings
def _validate_repo_id_deprecation(repo_id, name, organization):
"""Returns (name, organization) from the input."""
if not (repo_id or name):
raise ValueError(
"No name provided. Please pass `repo_id` with a valid repository name."
)
if repo_id and (name or organization):
raise ValueError(
"Only pass `repo_id` and leave deprecated `name` and "
"`organization` to be None."
)
elif name or organization:
warnings.warn(
"`name` and `organization` input arguments are deprecated and "
"will be removed in v0.7. Pass `repo_id` instead.",
FutureWarning,
)
else:
if "/" in repo_id:
organization, name = repo_id.split("/")
else:
organization, name = None, repo_id
return name, organization | 89653f85b4a29a02e32ce9f3b7c3a33d6bc75ff6 | 690,664 |
import importlib
def resolve_python_path(path):
"""
Turns a python path like module.name.here:ClassName.SubClass into an object
"""
# Get the module
module_path, local_path = path.split(':', 1)
thing = importlib.import_module(module_path)
# Traverse the local sections
local_bits = local_path.split('.')
for bit in local_bits:
thing = getattr(thing, bit)
return thing | 768fa4c5b260fe1bf66dd20a8c96f36e4fb0fefe | 690,665 |
from typing import Dict
def get_reversed_enumerated_from_dict(enumerated_dict: Dict[int, int]) -> Dict[int, int]:
"""
Get inverse of enumerated dictionary.
:param enumerated_dict:
:return:
"""
reversed_map = {}
for index_sorted, true_index in enumerated_dict.items():
reversed_map[true_index] = index_sorted
return reversed_map | a0fbed023fae12e3d92e1aa5f6fa2327e05a44d0 | 690,666 |
def maxThreats(a):
""" left_threads stores threads in left-top-to-right-bottom direction
right_threads stores threads in right-top-to-left-bottom direction"""
left_threads, right_threads = dict(), dict()
max_threads, threads = 0, [0] * len(a)
for row, col in enumerate(a):
col -= 1 # define top-left position as (0, 0)
left = col - row
right = col + row
if left in left_threads: # exists thread
threads[left_threads[left]] += 1 # nearest queen
if threads[left_threads[left]] == 4:
return 4 # max threads reached
threads[row] += 1 # current queen
left_threads[left] = row # mark nearest left-thread queen
if right in right_threads:
threads[right_threads[right]] += 1
if threads[right_threads[right]] == 4:
return 4 # max threads reached
threads[row] += 1
right_threads[right] = row # mark nearest right-thread queen
return max(threads) | c2e91ccfd59f4b49716487e637d52a916ca02123 | 690,667 |
def subsitute_env_vars( line, env ):
""" Substitutes environment variables in the command line.
E.g. dir %EPOCROOT% -> dir T:\
The command line does not seem to do this automatically when launched
via subprocess.
"""
for key, value in env.items():
line = line.replace( "%%%s%%" % key, value )
return line | 6dc8725d6ddbdcf0a7f59cfe18839210975fdd09 | 690,668 |
import torch
def validation(model, validationloader, criterion, device):
"""
During training, we will look at the performance of our Network with regards to the Validation Set.
Inputs:
- model: The model that is currently being trained.
- validationloader: A dataloader object representing the validation data.
- criterion: The criterion used to train the model.
"""
validation_loss = 0
validation_accuracy = 0
model.to(device)
for images, labels in validationloader:
images, labels = images.to(device), labels.to(device)
output = model.forward(images)
validation_loss += criterion(output, labels).item()
probability = torch.exp(output)
equality = (labels.data == probability.max(dim=1)[1])
validation_accuracy += equality.type(torch.FloatTensor).mean()
validation_loss = validation_loss/len(validationloader)
validation_accuracy = validation_accuracy/len(validationloader)
return validation_loss, validation_accuracy | f54ee6b4fdcd3144127d28eeedbb5b27ee8ba161 | 690,669 |
import argparse
def generate_parser():
"""Generate an argument parser."""
description = "%(prog)s -- Find a quality score for a HiC dataset"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(dest="input1", type=str, action='store', help="First quasar file name")
parser.add_argument(dest="input2", type=str, action='store', help="Second quasar file name")
parser.add_argument(dest="output", type=str, action='store', help="Results file name")
return parser | 2731d15a4b9589f784b7734d3f74850c6cc1b7a0 | 690,670 |
def cut_below() -> str:
"""Return a "cut after this line" line."""
return "\n.--Cut after this line --.\n" | 54e8098b1c27f7c7282049e21fe965f3305cdae0 | 690,671 |
import requests
import json
def get_poc_info(poc):
"""Retrieves ARIN point of contact information.
Args:
poc: The URL for the ARIN PoC information.
Returns:
A list containing point of contact information. The list contains the
name of the contact, the ARIN company name, the address, the country,
and the postal code. An example is below:
['John Smith', 'Acme Corp',
'123 Apple Way, Springfield, PA United States, 55401',
'john.smith@example.com', '+1-123-555-1234'
]
"""
# Local variables
address = ''
email = ''
phone = ''
return_list = []
# Request information
req = requests.get(poc+'.json')
response = req.text.encode('utf-8')
parsed = json.loads(response)
# Get name and company information
name = parsed['poc']['lastName']['$']
try:
name = parsed['poc']['firstName']['$'] + ' ' + name
except KeyError:
None
try:
company = parsed['poc']['companyName']['$']
except KeyError:
company = 'No company data'
# Get address information
try:
return_street = parsed['poc']['streetAddress']['line']['$']
address += str(return_street)
except TypeError:
i = 0
for line in parsed['poc']['streetAddress']['line']:
if len(address) <= 0:
address += str(parsed['poc']['streetAddress']['line'][i]['$']).rstrip()
else:
address += ', '+str(parsed['poc']['streetAddress']['line'][i]['$']).rstrip()
i += 1
try:
return_city = parsed['poc']['city']['$']
address += ', '+str(return_city)
except KeyError:
None
try:
return_country = parsed['poc']['iso3166-1']['name']['$']
address += ', '+str(return_country)
except KeyError:
None
try:
return_postal = parsed['poc']['postalCode']['$']
address += ', '+str(return_postal)
except KeyError:
None
if address[0] == ',':
address = address.split(',')[1]
# Get email or emails
try:
email = parsed['poc']['emails']['email']['$']
except TypeError:
i = 0
for line in parsed['poc']['emails']['email']:
if len(email) <= 0:
email += str(parsed['poc']['emails']['email'][i]['$']).rstrip()
else:
email += ', '+str(parsed['poc']['emails']['email'][i]['$']).rstrip()
# Get phone number or numbers
try:
phone = str(parsed['poc']['phones']['phone']['number']['$'])
except TypeError:
i = 0
for line in parsed['poc']['phones']['phone']:
if len(phone) <= 0:
phone += str(parsed['poc']['phones']['phone'][i]['number']['$']).rstrip()
else:
phone += ', '+str(parsed['poc']['phones']['phone'][i]['number']['$']).rstrip()
# Return information
return_list.append(name)
return_list.append(company)
return_list.append(address)
return_list.append(email)
return_list.append(phone)
return return_list | 7ac8a546e18e15293d1c959ee0647acde01cc77c | 690,672 |
def effect_on_response(codes, effect, result):
"""
Returns the specified effect if the resulting HTTP response code is
in ``codes``.
Useful for invalidating auth caches if an HTTP response is an auth-related
error.
:param tuple codes: integer HTTP codes
:param effect: An Effect to perform when response code is in ``codes``.
:param result: The result to inspect, from an Effect of :obj:`Request`.
"""
response, content = result
if response.code in codes:
return effect.on(success=lambda ignored: result)
else:
return result | 329f81c0880768010d19cf5905e34b524b5825b9 | 690,673 |
def format_markdown(content, params):
"""Format content with config parameters.
Arguments:
content {str} -- Unformatted content
Returns:
{str} -- Formatted content
"""
try:
fmt = content.format(**params)
except KeyError:
fmt = content
return fmt | d009a3217ee1e5efdf1ee48d44b0456080595e93 | 690,674 |
def snake_to_camel(s: str):
"""
Convert a snake_cased_name to a camelCasedName
:param s: the snake_cased_name
:return: camelCasedName
"""
components = s.split("_")
return components[0] + "".join(y.title() for y in components[1:]) | 10ec9951a9b63835a8161a8aa8666f1873348a6e | 690,675 |
import subprocess
import time
def wait_for_compose(name, architecture=None, labcontroller=None, timeout=7200, wait_step=600):
"""
:param name: Compose ID
:type name: str
:param architecture: Architecture like x86_64 or aarch64. If not provided, any available architecture is sufficient.
:type architecture: str, optional
:param labcontroller: Name of beaker labcontroller. If not provided, any labcontroller is sufficient.
:type labcontroller: str, optional
:param timeout: Number of seconds how long it should be waited for the compose to be available.
:type timeout: int, optional
:param wait_step: Interval used for re-cheking the availability of the compose.
:type wait_step: int, optional
:return: True if the compose of given name and architecture is available in the labcontroller in less than timeout seconds. Return False otherwise.
:rtype: bool
"""
command = ['bkr', 'distro-trees-list', '--name', name]
if architecture:
command += ['--arch', architecture]
if labcontroller:
command += ['--labcontroller', labcontroller]
while timeout > 0:
with open('/dev/null', 'wb') as devnull:
if subprocess.run(command, stdout=devnull, stderr=devnull).returncode == 0:
return True
time.sleep(wait_step)
timeout -= wait_step
return False | 95cb9c06fbbc5d5ad74be4328202f1b97176864b | 690,676 |
from typing import AbstractSet
def add_to_set(set_: AbstractSet[str] | None, new: str) -> set[str]:
"""Add an entry to a set (or create it if doesn't exist).
Args:
set_: The (optional) set to add an element to.
new: The string to add to the set.
"""
return set(set_).union([new]) if set_ is not None else {new} | ec1f6e3ca51bc11ff0996a1aab00e84e2c23228e | 690,677 |
def get_clean_bool_option(a_string):
""" This function removes the 'YES\n' or 'NO\n' from the end of the
option so that it can be set differently later on.
This function returns a revised option almost ready for CLI usage.
"""
option, _ = (a_string).split('=')
option = "-D{}".format(option)
return option | bbfb240659c1c7804c5777a29092cdac67eb3cd5 | 690,678 |
def weight_l1_loss(pred_loc, label_loc, loss_weight):
"""
:param pred_loc: [b, 4k, h, w]
:param label_loc: [b, 4k, h, w]
:param loss_weight: [b, k, h, w]
:return: loc loss value
"""
b, _, sh, sw = pred_loc.size()
pred_loc = pred_loc.view(b, 4, -1, sh, sw)
diff = (pred_loc - label_loc).abs()
diff = diff.sum(dim=1).view(b, -1, sh, sw)
loss = diff * loss_weight
return loss.sum().div(b) | 20736d8bf69f42e8bc41fc442e7bb79d4864ee13 | 690,679 |
def read_phase_results(stable_phases,calculation_results) :
"""
read_phase_results
# input
compostable_phasesnents: list of strings,
calculation_results: TC_python calcualgtion result object
# output
volume_fractions_temp: list of floats, volume fractions of pahses
phase_fractions_temp: list of floats, phase fractions of phases
"""
volume_fractions_temp,phase_fractions_temp=[],[]
for phase in stable_phases:
volume_fractions_temp.append({'vpv({})'.format(phase):calculation_results.get_value_of('vpv({})'.format(phase))})
phase_fractions_temp.append({'npm({})'.format(phase):calculation_results.get_value_of('npm({})'.format(phase))})
return(volume_fractions_temp,phase_fractions_temp) | bd456ffe616521d7a4e2ebd4f9878243423840a7 | 690,680 |
import subprocess
def call(cmd, **kwargs):
"""Calls the given shell command. Output will be displayed. Returns the
status code."""
kwargs['shell'] = True
return subprocess.call(cmd, **kwargs) | b1109402b2a7e9b274a909d824b90f0928a16ca2 | 690,681 |
def _transform_record (record):
"""Transform a record from a list of fields to a dict.
Doesn't handle nested records.
"""
if isinstance(record, list):
# value can only be missing if it was an empty sequence of tokens,
# which should have become a list, which we should transform into a dict
return {field['name']: field.get('value', {}) for field in record}
else:
return record | fbdbb2e15e6e5781ccd93588bdb50f4df936155b | 690,682 |
import os
def get_path(name):
"""helper"""
basedir = os.path.dirname(__file__)
return "%s/../data/wepp/%s" % (basedir, name) | 50d3e69c6f75c51a8d14ce47037e9c25ecd37c8c | 690,683 |
def make_orderer():
"""
Create helper functions for sorting and comparing objects
"""
order = {}
def orderer(obj):
order[obj.__name__] = len(order)
return obj
def comparator(obj_a, obj_b):
return [1, -1][order[obj_a] < order[obj_b]]
return orderer, comparator | 7933b4f8d5d733e141c03bff4d6c45318e73e118 | 690,684 |
def extrapolDiff(f, x, h):
"""return Ableitung der Funktion f an Stelle x
nach '1/3h *( 8*(f(x + h/4)-f(x - h/4)) - (f(x + h/2) - f(x - h/2)))'
"""
return 1/(3*h) * (8*(f(x+h/4) - f(x-h/4)) - (f(x+h/2) - f(x-h/2))) | f3e9c1cc58a7aa8d3f9767bfd34ff3938b8a2edc | 690,685 |
import torch
def invert_convert_to_box_list(x: torch.Tensor, original_width: int, original_height: int) -> torch.Tensor:
""" takes input of shape: (*, width x height, ch)
and return shape: (*, ch, width, height)
"""
assert x.shape[-2] == original_width * original_height
return x.transpose(dim0=-1, dim1=-2).view(list(x.shape[:-2]) + [x.shape[-1], original_width, original_height]) | 4f98f955bb6373bf358108fdf237aa109afae436 | 690,686 |
def merge_geometry(catchment, splitCatchment, upstreamBasin):
"""Attempt at merging geometries"""
print('merging geometries...')
d = 0.00045
# d2 = 0.00015 # distance
cf = 1.3 # cofactor
splitCatchment = splitCatchment.simplify(d)
diff = catchment.difference(splitCatchment).buffer(-d).buffer(d*cf).simplify(d)
mergedCatchmentGeom = upstreamBasin.difference(diff).buffer(-d).buffer(d*cf).simplify(d)
# mergedCatchmentGeom = upstreamBasin.union(splitCatchment).buffer(-d).buffer(d*cf).simplify(d)
print('finished merging geometries')
return mergedCatchmentGeom | d57b1baec73a2fe24b52059f2dc231bebf0decb7 | 690,687 |
def load_ndarray(arr):
""" Load a numpy array """
# Nothing to be done!
return arr | 03db490e1541e8f4e6d6d26d9825d37bca34dd92 | 690,688 |
import re
def convert_crfpp_output(crfpp_output):
"""
Convert CRF++ command line output.
This function takes the command line output of CRF++ and splits it into
one [gold_label, pred_label] list per word per sentence.
Parameters
----------
crfpp_output : str
Command line output obtained from a CRF++ command.
Returns
-------
result : list
List of [gold_label, pred_label] per word per sentence.
"""
res = [[re.split(r'\t', token_output)[-2:] for token_output
in re.split(r'\n', sentence_output)]
for sentence_output in re.split(r'\n\n+', crfpp_output.strip())]
return res | dbb23736516755706a3910fea86a2cfdf76771e0 | 690,689 |
def create_word_inds_dicts(words_counted,
specials=None,
min_occurences=0):
""" creates lookup dicts from word to index and back.
returns the lookup dicts and an array of words that were not used,
due to rare occurence.
"""
missing_words = []
word2ind = {}
ind2word = {}
i = 0
if specials is not None:
for sp in specials:
word2ind[sp] = i
ind2word[i] = sp
i += 1
for (word, count) in words_counted:
if count >= min_occurences:
word2ind[word] = i
ind2word[i] = word
i += 1
else:
missing_words.append(word)
return word2ind, ind2word, missing_words | 769d16257b4f979444a347079392ca632b5f05c1 | 690,691 |
import math
def normalize_values_in_dict(dictionary, factor=None, inplace=True):
""" Normalize the values in a dictionary using the given factor.
For each element in the dictionary, applies ``value/factor``.
Parameters
----------
dictionary: dict
Dictionary to normalize.
factor: float, optional (default=None)
Normalization factor value. If not set, use the sum of values.
inplace : bool, default True
if True, perform operation in-place
"""
if factor is None:
factor = sum(dictionary.values())
if factor == 0:
raise ValueError('Can not normalize, normalization factor is zero')
if math.isnan(factor):
raise ValueError('Can not normalize, normalization factor is NaN')
if not inplace:
dictionary = dictionary.copy()
for key, value in dictionary.items(): # loop over the keys, values in the dictionary
dictionary[key] = value / factor
return dictionary | 109d30d3661cae45c6a4983bd7cc66ff0dcfbdf3 | 690,692 |
def get_lines(filepath):
"""
"""
lines = []
with open(filepath, 'r') as f:
for i, l in enumerate(f.readlines()):
l = l.strip()
lines.append(l)
return lines | ffbe3d183fdfb6c29d2a4251abb4df849afe5dbe | 690,693 |
def getDezenas(resultado):
"""
Gera o texto das dezenas para ser falado com o resultado
:param resultado: dict
:return string
"""
concurso = str(resultado['numero'])
return 'O resultado da Loteria Federal no dia '+resultado['dataApuracao']+' foi: 1º Prêmio: '+resultado['listaDezenas'][0][4:]+', 2º Prêmio: '+resultado['listaDezenas'][1][4:]+', 3º Prêmio: '+resultado['listaDezenas'][2][4:]+', 4º Prêmio: '+resultado['listaDezenas'][3][4:]+', 5º Prêmio: '+resultado['listaDezenas'][4][4:]+'. Este resultado foi fornecido pela Caixa Econômica Federal.' | 243b226d0f63ae1e280c9f605c14df70805c9f09 | 690,694 |
def flatten_results(results, comparison_records, print_warning=False):
"""Flattens and extract a deep structure of results based on a list of
comparisons desired."""
# process the request comparisons into chunks
comparisons = [x.split('.') for x in comparison_records]
# store a nested tree structure of results for counting in N dimensions
table_results = {}
for record in results:
values = []
# if a tree doesn't have an item, we skip the counting
try:
for comparison in comparisons:
# collect the value from the nested structure
value = record
for item in comparison:
if isinstance(value, list):
value = value[int(item)]
else:
value = value[item]
values.append(value)
# descend the tree creating spots as needed
spot = table_results
for value in values:
if value not in spot:
spot[value] = {}
spot = spot[value]
if 'ans' not in spot:
spot['ans'] = 0
spot['ans'] += 1
except Exception as excep:
if print_warning:
print(f"failed to find {comparisons}\n exception: {excep}\n data: {record}")
pass
return table_results | b5250500b39a00e0feb124baf76fcbf2b0e3a66e | 690,695 |
import json
def load_times(filename="cvt_add_times.json"):
"""Loads the results from the given file."""
with open(filename, "r") as file:
data = json.load(file)
return data["n_bins"], data["brute_force_t"], data["kd_tree_t"] | 1fc27348ed2028a6ebdcb13b488d98614a28c83f | 690,696 |
def is_even_or_odd(number: int) -> str:
"""Number `odd` or `even`."""
return "even" if number % 2 == 0 else "odd" | 1c8bf9985b96e2fed44585f6a68eca5ede93c0bc | 690,697 |
def add_leading_zero(number: int, digit_num: int = 2) -> str:
"""add_leading_zero function
Args:
number (int): number that you want to add leading zero
digit_num (int): number of digits that you want fill up to. Defaults to 2.
Returns:
str: number that has the leading zero
Examples:
>>> add_leading_zero(5, 3)
"005"
"""
return str(number).zfill(digit_num) | 8db9bbb762e33510de896c09917b0d832e81f7de | 690,698 |
def create_repr_string(o):
"""
Args:
o (object): any core object
Returns:
str: repr string based on internal attributes
"""
# Filter peripheral unimportant attribute names:
params = [
attr for attr in dir(o) if not attr.startswith('__') # Data-model dunder methods
and not callable(getattr(o, attr)) # Remove other methods, keep only fields
and not attr.startswith("_abc") # Remove abstract-related attributes
and not attr.endswith("_") # Remove attributes stated after initialization
and not attr == "CALCULATE_EFFECT" # Remove the EffectEstimator attribute
]
# Special treatment for scikit-learn's learner object (the "learner" attribute) - place last in a new line:
learner_included = False
if "learner" in params:
params.remove("learner")
# params.append("learner") # move learner to be last parameter
learner_included = True
# Couple attribute name with attribute value
params = [(attr, getattr(o, attr)) for attr in params]
params_string = ", ".join("{}={}".format(*param) for param in params)
if learner_included:
# Place "learner" attribute last in a different line:
params_string += ",\n{spaces}learner={learner}".format(spaces=" " * (len(o.__class__.__name__) + 1),
learner=getattr(o, "learner"))
repr_string = "{cls_name}({params})".format(cls_name=o.__class__.__name__,
params=params_string)
return repr_string | e9740af38647347933ede49328d6dfaf139cb01a | 690,700 |
import re
def _find_streams(text):
"""Finds data streams in text, returns a list of strings containing
the stream contents"""
re_stream = re.compile(r"<< /Length \d+ >>\n(stream.*?endstream)", re.DOTALL)
streams = []
for m in re_stream.finditer(text):
streams.append(text[m.start(1):m.end(1)])
return streams | 37f011276d4ca2eeeb03927910b2d494519cd17e | 690,701 |
import torch
def _create_1d_regression_dataset(n: int = 100, seed: int = 0) -> torch.Tensor:
"""Creates a simple 1-D dataset of a noisy linear function.
:param n: The number of datapoints to generate, defaults to 100
:param seed: Random number generator seed, defaults to 0
:return: A tensor that contains X values in [:, 0] and Y values in [:, 1]
"""
torch.manual_seed(seed)
x = torch.rand((n, 1)) * 10
y = 0.2 * x + 0.1 * torch.randn(x.size())
xy = torch.cat((x, y), dim=1)
return xy | 1534c7a968dfb3663c1f4d953e3088225af54b5f | 690,702 |
def decrease_parameter_closer_to_value(old_value, target_value, coverage):
"""
Simple but commonly used calculation for interventions. Acts to decrement from the original or baseline value
closer to the target or intervention value according to the coverage of the intervention being implemented.
Args:
old_value: Baseline or original value to be decremented
target_value: Target value or value at full intervention coverage
coverage: Intervention coverage or proportion of the intervention value to apply
"""
return old_value - (old_value - target_value) * coverage if old_value > target_value else old_value | 4f22c90fae1c69801ff4c89c1ed34ca1362dc92f | 690,703 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.