content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def invalid_policy():
"""Returns wrong format non-existent policy."""
return {"name": "foo"} | 6df1d107d856116d09ca159c7ff97408198f4bb5 | 694,404 |
def is_black_distribution(distribution):
""" Check if the distribution is implying a black instance (used in data.update()) """
return all([x < 0 for x in distribution]) | c6d8f1d62e04190bedb9aed48d39036dd37c1fe3 | 694,405 |
def split_str_w_esc(astring, delimiter, escape='\\'):
"""
Split string based on delimiter defined in call and the escape character \\
To escape use of the delimiter in the strings. Delimiter may be multi
character.
Returns list of elements split from the input str
"""
ret = []
current_element = []
iterator = iter(astring)
for ch in iterator:
if ch == escape:
try:
next_character = next(iterator)
# Do not copy escape character if intended to escape either the
# delimiter or the escape character itself. Copy the escape
# character if it is not in use to escape one of these
# characters.
if next_character not in [delimiter, escape]:
current_element.append(escape)
current_element.append(next_character)
except StopIteration:
current_element.append(escape)
elif ch == delimiter:
# split! (add current to the list and reset it)
ret.append(''.join(current_element))
current_element = []
else:
current_element.append(ch)
ret.append(''.join(current_element))
return ret | a07aebaccca29c72843f9b2b80ef34549b137574 | 694,406 |
import os
def abspath(*args):
"""Generates absolute paths"""
return os.path.abspath(os.path.join(*args)) | be6b281addaf028b8bc40d674fbc38f26c33a95f | 694,407 |
def ask_for_file(prompt, mode):
"""
Ask user for a file.
:param str prompt: Prompt presented to the user for the input
:param str mode: Mode to be used when opening the file
:return: file object
"""
file_obj = None
while not file_obj:
filename = input(prompt)
try:
file_obj = open(filename, mode)
except Exception:
print("{} is invalid.".format(filename))
return file_obj | 663b991e7e288b747c2a9e67220d561b1598aa81 | 694,408 |
import collections
import os
def parse_obsid_hsc_grism(obsid):
"""
Given an HSC grism observation ID, return the FITS file to read.
:param obsid: The HSC grism observation ID to retrieve the data from.
:type obsid: str
:returns: tuple -- An error code and a file to read, including the path.
Error codes:
0 = No error parsing observation ID.
1 = Observation ID is a 2D spectral image, and not a 1D extracted spectrum.
2 = Error parsing observation ID to determine path of file on disk.
3 = Extracted spectra FITS file not found.
"""
# Create namedtuple as the return object.
parsed_values = collections.namedtuple('ParseResult', ['errcode',
'specfiles'])
# Initialize error code to 0 = pass.
error_code = 0
# Check if this is a 2D spectral image. The signpost is the presence of
# an extension ".spec2d.fits".
if obsid[-12:].lower() == ".spec2d.fits":
error_code = 1
return parsed_values(errcode=error_code, specfiles=[''])
# Example ObservationID:
# HAG_J033148.83-274850.4_UDFNICP2_V01.SPEC1D.FITS
# Parse the observation ID to get components needed to get path.
obsid_splits = obsid.lower().split('_')
# Instrument part of path.
if obsid_splits[0] == 'hag':
obsid_instpart = 'acsgrism'
elif obsid_splits[0] == 'hng':
obsid_instpart = 'nicgrism'
else:
error_code = 2
return parsed_values(errcode=error_code, specfiles=[''])
# Subdirectory part of path.
obsid_subdirpart = obsid_splits[2][0:4] + os.path.sep + obsid_splits[2][0:6]
# Generate the full path and name of the file to read.
file_location = (os.path.pardir + os.path.sep + os.path.pardir +
os.path.sep + "missions" + os.path.sep + "hst" +
os.path.sep + "hla" + os.path.sep + 'data24' +
os.path.sep + obsid_instpart + os.path.sep +
obsid_subdirpart + os.path.sep)
# The name of the FITS file is the observation ID.
spec_file = file_location + obsid.lower()
if os.path.isfile(spec_file):
return parsed_values(errcode=error_code, specfiles=[spec_file])
error_code = 3
return parsed_values(errcode=error_code, specfiles=['']) | afe3a9106d1ba1f9a531f15ff88ecb9301950189 | 694,409 |
import os
def _make_filename_hess_scheme(obs_id, filetype='events'):
"""Make filename string for the HESS storage scheme.
Parameters
----------
obs_id : int
Observation ID
filetype : {'events', 'effective area', 'psf', 'background'}
Type of file
Examples
--------
>>> _make_filename_hess_scheme(obs_id=89565, filetype='effective area')
'run089400-089599/run089565/hess_aeff_2d_089565.fits.gz'
"""
obs_id_min = obs_id - (obs_id % 200)
obs_id_max = obs_id_min + 199
group_folder = 'run{:06d}-{:06d}'.format(obs_id_min, obs_id_max)
obs_folder = 'run{:06d}'.format(obs_id)
if filetype == 'events':
label = 'events'
elif filetype == 'psf':
label = 'psf_king'
elif filetype == 'effective area':
label = 'aeff_2d'
elif filetype == 'background':
label = 'bkg_offruns'
else:
raise ValueError('Unknown filetype: {}'.format(filetype))
filename = 'hess_{}_{:06d}.fits.gz'.format(label, obs_id)
return os.path.join(group_folder, obs_folder, filename) | a8a06a30b817d34021b090b50fa7ecc3d2595455 | 694,410 |
def insertion_sort(arr):
"""
Insertion sort algoritm
"""
# Comparisons
comp = 0
# Move through 1 to len(arr)
for i in range(1, len(arr)):
key = arr[i]
# Move els of [0, i-1] that are greater than key 1 pos ahead
# Move the key to the pos where it is bigger than the prev num(j+1)
j = i - 1
while j >= 0:
if arr[j] > key:
arr[j+1] = arr[j]
j -= 1
comp += 1
else:
comp += 1
break
arr[j+1] = key
return comp | 3cc2aaf8511d4352cff8ac9a1db4c28647bc397d | 694,411 |
import re
def parse_request_error_str(st):
"""
:param st:
:return:
"""
pattern = re.compile(
r"HTTPS?ConnectionPool\(host='([^\x22]+)', port=(\d+)\): Max retries exceeded with url: (/\S*) \(")
match = re.match(pattern, st)
if match:
host, port, url = match.groups()
port = str(port)
if port == 443:
ret = "https://%s%s" % (host, url)
else:
ret = "http://%s%s" % (host, url)
return ret | dc8f3afd77eb55fd3751ed7aa77ff48bcfa75f9a | 694,412 |
def to_requests_format(ip, port):
""" Returns the proxy format for requests package """
return {'http': 'http://{}:{}'.format(ip, port),
'https': 'http://{}:{}'.format(ip, port)} | 54611879183d1d072dd8bd36f062a3cf3119e110 | 694,413 |
import re
def search_for_pesto(driver, url):
"""Checks the specified website for the existence of pesto. The website
is assumed to be some kind of eatery that sometimes serves food with pesto.
Arguments:
driver -- A Selenium webdriver
url -- The url of the website to be searched
Returns true if the website contains pesto, and false otherwise.
>>> driver = webdriver.PhantomJS("./phantomjs/bin/phantomjs")
>>> search_for_pesto(driver, "google.com")
False
>>> search_for_pesto(driver, "http://allrecipes.com/recipe/pesto/")
True
"""
driver.get(url)
pg_src = driver.page_source
return re.search(r'[Pp]esto', pg_src) is not None | f8e1b12eb6164ecd7ca4443eac6f43357fdb299f | 694,414 |
def prob_service(state, lambda_1, mu, num_of_servers):
"""
Gets the probability of finishing a service
"""
return (min(state[1], num_of_servers) * mu) / (
lambda_1 + (mu * min(state[1], num_of_servers))
) | e6b0df74ec0e2ce5399fd24487e984ac34b2234d | 694,415 |
import hmac
def kdf(key: bytes, data: bytes) -> bytes:
"""Implementation of Generic Key Derivation Function in Annex B.2 of
ETSI TS 133 220 V11.4.0 (2012-10).
:param key: denoted key
:param data: data to be hashed
:returns: derived key, the hashed data
"""
return hmac.new(key, data, "sha256").digest() | 458c99cd7f8341ea0e76a0ea76f830d6d0010d4d | 694,416 |
import string
import random
def random_string(length: int = 6, chars: str = string.ascii_lowercase) -> str:
"""Create and return a random string."""
return ''.join(random.SystemRandom().choice(chars) for _ in range(length)) | a06efa5c50c42c15b727fede72dd17ca4e877ada | 694,418 |
def dbg(x):
"""does nothing, legacy dummy function"""
return '' | 0d6db000ffe6d5174d09196f2e817ecc3ce20583 | 694,419 |
def gen_col_vals(dt_data, col_name):
"""根据读取到的pkl对象, 返回该列值组成的列表"""
col_idx = dt_data['cols'].index(col_name)
vals = []
for line in dt_data['data']:
vals.append(line[col_idx])
return vals | c736de1c903e0045f228f7e5db173ea81e4fc2ee | 694,420 |
def extract_first_word(mystr, separators):
""" Splits a string into 2 parts without using regexp
and return the first part (before a known word separator)
"""
for i, char in enumerate(mystr):
if char in separators:
return mystr[:i], mystr[i:]
return mystr, '' | b220c193f651237cfe5e85cebcdde18415e72019 | 694,421 |
import numpy
def safe_mean_vector(vectors):
""" Returns mean profile discarding non finite values """
# if only one vector, avg = itself
if len(vectors)==1:
return vectors[0], numpy.zeros(len(vectors[0]))
# Takes the vector length form the first item
length = len(vectors[0])
safe_mean = []
safe_std = []
for pos in range(length):
pos_mean = []
for v in vectors:
if numpy.isfinite(v[pos]):
pos_mean.append(v[pos])
safe_mean.append(numpy.mean(pos_mean))
safe_std.append(numpy.std(pos_mean))
return safe_mean, safe_std | d27ed6c3904ee274326d272cbc34dc2873197500 | 694,422 |
import itertools
def flatten(lst):
"""
Flattens one level of a list.
Parameters
----------
lst : list
Returns
-------
list
List flattened by one level.
"""
return list(itertools.chain.from_iterable(lst)) | 5d30ca71acabeec57252f1e466dbe250ce6742cd | 694,423 |
async def guild_only_predicate(ctx):
"""A predicate to test if a command was run in a guild
:param ctx: The context of the predicate
"""
return ctx.guild | 88b3ef772d80d22fe2fdd855e75370ce229d4d96 | 694,424 |
def set_axis_labels(inargs, xunits, yunits):
"""Set the x and y axis labels."""
if inargs.xlabel:
xname = inargs.xlabel.replace('_', ' ')
else:
xname = inargs.xvar.replace('_', ' ')
xlabel = '%s (%s/yr)' %(xname, xunits)
if inargs.ylabel:
yname = inargs.ylabel.replace('_', ' ')
else:
yname = inargs.yvar[0].replace('_', ' ')
if str(yunits) == 'kg m-2 s-1':
yunits = '$kg \: m^{-2} \: s^{-1}$'
ylabel = '%s (%s/yr)' %(yname, yunits)
return xlabel, ylabel | 6ec5d56de735c6c664c757aeb448f0ae8b01fadc | 694,425 |
import platform
def getCurrentOS():
"""
This function returns an abbreviation of the current Operating System.
Return values:
linux
mac
win
"""
currentOS = platform.system()
if currentOS == "Linux" :
myOSName = "linux"
elif currentOS == "Windows" :
myOSName = "win"
elif currentOS == "Darwin" :
myOSName = "mac"
else:
raise RuntimeError('The current Operating System is unknown.')
return myOSName | c9cd582befe3e5e4f0ce54343a1ed1c0394768b8 | 694,426 |
from typing import Dict
import json
def dict_to_str(d: Dict) -> str:
""" Dump dict to string. """
return json.dumps(d) | 2d4ab5fab232e3b77f6de5fcd0b5e7f0990395a0 | 694,427 |
def with_metaclass(meta_class, base_class=object):
"""
:param meta_class: The desired metaclass to use
:param base_class: The desired base class to use, the default one is object
:type base_class: Type
:return: Metaclass type to inherit from
:Example:
.. code-block:: python
class MyMetaClass(type):
def __new__(mcs, name, bases, attrs):
klass = super(MyMetaClass, mcs).__new__(mcs, name, bases, attrs)
klass.added = "Added field"
return klass
class MyClass(with_metaclass(MyMetaClass)):
pass
# This is equivalent to python 2:
class MyClass(object):
__metaclass__ = MyMetaClass
# Or python 3
class MyClass(object, metaclass=MyMetaClass):
pass
"""
return meta_class(
'with_meta_base_' + base_class.__name__ + '_' + meta_class.__name__,
(base_class,),
{}
) | ef63016c3939d451b62bb1790a5b9268cde11de1 | 694,428 |
def get_soft_IoU(mask1, mask2, dim, epsilon=1):
"""Get soft IoU score for two masks.
Args:
mask1, mask2: two masks with the same shape and value between [0, 1]
dim: dimensions over which to aggregate.
"""
soft_IoU = (mask1 * mask2).sum(dim) / (mask1 + mask2 - mask1 * mask2).sum(dim).clamp(epsilon)
return soft_IoU | 5b8fa32c04cdb27efc9e7798e82eb5fede8ad6ab | 694,429 |
import fcntl
import errno
def xioctl(fd, req, arg):
"""
Wrapper around ioctl that polls it until it no longer returns EINTR
"""
while True:
try:
r = fcntl.ioctl(fd, req, arg)
except IOError as e:
if e.errno != errno.EINTR:
raise
print("Waiting...")
else:
return r | 42dd19410d17bc10d3e4a28610294a843140bc87 | 694,430 |
def critical_density(wavelength=800):
"""
Get the critical density for a laser with the given wavelength.
Args:
wavelength: Laser wavelength (in nm)
Returns:
(float) Critical density (particles/cm^3)
"""
# From the SI formula
# epsilon_0*electron mass/(electron charge)^2*(2*pi*c/(wavelength))^2/cm^-3
return 1.11485422E27 / wavelength ** 2 | 7ce5ac907148bdad463dfad8c9876c67ed8db6da | 694,431 |
def translate(s, table, deletions=""):
"""translate(s,table [,deletions]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletions are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256. The
deletions argument is not allowed for Unicode strings.
"""
if deletions:
return s.translate(table, deletions)
else:
# Add s[:0] so that if s is Unicode and table is an 8-bit string,
# table is converted to Unicode. This means that table *cannot*
# be a dictionary -- for that feature, use u.translate() directly.
return s.translate(table + s[:0]) | 8124932842fe59296e58b2b34e390dbed85a8d0e | 694,432 |
from typing import Any
def islist(item: Any) -> bool:
"""Return true if item is list/tuple"""
return isinstance(item, (list, tuple)) | 9d84711020c3c372a42a5f3c7d435ec2696016a4 | 694,433 |
from typing import Optional
import logging
def get_existing_handlers(handlertype) -> Optional[logging.Handler]:
"""
Returns Existing handler or None (if the handler has not yet been added to the root handlers).
"""
return next((h for h in logging.root.handlers if isinstance(h, handlertype)), None) | b5cdfbf20133fcc7629c3291f1111fe353b067af | 694,434 |
import re
def remove_comments(s, mode='all'):
"""
>>> remove_comments(None)
>>> remove_comments('hi there')
'hi there'
>>> remove_comments('foo/')
'foo/'
>>> remove_comments('hi # there')
'hi'
>>> remove_comments('hi; there')
'hi; there'
>>> remove_comments('# hi; there')
''
>>> remove_comments("hi// WHERE=1")
'hi'
>>> remove_comments('hi /* there */')
'hi'
>>> remove_comments('hi / there /')
'hi / there /'
>>> remove_comments("hi@there.com")
'hi@there.com'
>>> remove_comments('<p>Hi <!-- something -->There</p>')
'<p>Hi There</p>'
>>> remove_comments('<p>Hi <! something -->There</p>')
'<p>Hi There</p>'
>>> remove_comments('<p>Hi There</p><!--[if !mso]>')
'<p>Hi There</p>'
>>> remove_comments('<p>Hi There</p><!--[if !mso]> But not this', mode='xml')
'<p>Hi There</p> But not this'
"""
if s is None:
return None
if mode in ('all',):
s = s.split('//')[0]
if mode in ('all',):
s = s.split('#')[0]
if mode in ('all',):
s = re.sub(r'/\*.*\*/', '', s)
if mode in ('all', 'html', 'xml'):
# Remove HTML/XML comments
s = re.sub(pattern=r'(<!)([^>]+)(>)', repl='', string=s)
return s.strip() | e8071351d469d43b72775241baab9185ab9b92e9 | 694,435 |
def sample_size(gdf,x,i):
"""
Parameters
----------
gdf:geodataframe
x: int
size of sample
i : int
number of samples
Returns
-------
list:
list of samples with input size
"""
sample_list = [gdf.sample(x) for j in range(i)]
return sample_list | 9ed54a27a82c3f740a8813400254dab96f341c5a | 694,436 |
import os
import ctypes
from distutils.sysconfig import get_config_var
def _load_preload_lib():
"""profiling won't work if the library isn't preloaded using LD_PRELOAD,
but we ensure it's loaded anyway so that we can import the cython
extension and call its functions still - otherwise it won't import since
it has not been linked to the preload library."""
this_dir = os.path.dirname(os.path.realpath(__file__))
so_name = os.path.join(this_dir, 'preload')
ext_suffix = get_config_var('EXT_SUFFIX')
if ext_suffix is not None:
so_name += ext_suffix
else:
so_name += '.so'
ctypes.CDLL(so_name, ctypes.RTLD_GLOBAL)
return so_name | 0a9e8f9b0df8eefe507719744dc96ea7ae23f724 | 694,437 |
def _is_array(v):
"""
Returns True if the given value is an Array.
:param v: the value to check.
:return: True if the value is an Array, False if not.
"""
return isinstance(v, list) | f7ab95060e0d2874c97fdb6580fd751976c3d7f1 | 694,441 |
def lift_calc(PPV, PRE):
"""
Calculate Lift score.
:param PPV: Positive predictive value (PPV)
:type PPV: float
:param PRE: Prevalence
:type PRE: float
:return: lift score as float
"""
try:
return PPV / PRE
except (ZeroDivisionError, TypeError):
return "None" | 1d31719ba3d6c9dfbbdf8bdacff0e6099b9ad623 | 694,442 |
def calculate_summary_stats(dataframe, label_col, col_of_interest):
"""
:param dataframe: dataframe containing concept annotations
:param label_col: column with class label
:param col_of_interest: concept column of which summary statistics are needed
:return: summary statistics for concept of interest per class label
"""
data_stats = dataframe[[label_col, col_of_interest]]
data_stats = data_stats.groupby(label_col).describe().unstack(1).reset_index().pivot(index=label_col, values=0,
columns='level_1')
print(f'Summary stats for {col_of_interest}: ', data_stats)
return data_stats | ee6d1c36710f749fa5aca58611ab114502acfeba | 694,443 |
def get_later_data(data: dict, timezone: str):
"""
Через 6 часов (т.е. следующий период)
"""
return data.get('forecast', {}).get('parts', [])[0] | eb384e54bdf06e7d31844b73c524a7380cc9d2b0 | 694,444 |
def _expand_dims_nonnegative_axis(axis, rank):
"""Get the nonnegative axis according to the rules of tf.expand_dims."""
# Implementation note: equivalent to get_positive_axis(axis, rank + 1)
if axis < 0:
new_axis = (1 + rank) + axis
if new_axis < 0:
# Note: this is unreachable in the current code.
raise ValueError("Axis out of range: " + str(axis))
return new_axis
elif axis > rank:
# Note: this is unreachable in the current code.
raise ValueError("Axis larger than rank: " + str(axis) + " > " + str(rank))
return axis | bf0b01ac8da9ba09ddecf5475db13f71968b00f6 | 694,445 |
def is_nonce_too_low_exception(exception):
"""check if the error thrown by web3 is a 'nonce too low' error"""
if not isinstance(exception, ValueError) or not isinstance(exception.args[0], dict):
return False
message = exception.args[0].get("message", "")
return (
"There is another transaction with same nonce in the queue" in message
or "Transaction nonce is too low" in message
) | f4b465fc222eb68b59e5ea6fef410ac68485966e | 694,446 |
def part2(in_list):
"""basically a rewrite of part 1, for shame"""
size = len(in_list)
i = 0
steps = 0
while True:
next_step = i + in_list[i]
if in_list[i] >= 3:
in_list[i] -= 1
else:
in_list[i] += 1
if size <= next_step or next_step < 0:
return steps + 1
steps += 1
i = next_step | cecb812e6a788c5508da18a89579922ea2f535f2 | 694,447 |
def fix_url(url):
"""Makes sure that an url is properly formatted.
Args:
url The url to fix
Returns:
The fixed URL
"""
if url.startswith("//"):
return "http:" + url
else:
return url | 2abcef62b05df26d836d47ba4f244473a45cef08 | 694,448 |
import math
def calculate_weighted_avg(bonds):
"""
Get the weighted average bond length given by the effective coordination number formula in Hoppe (1979)
:param bonds: (list) list of floats that are the bond distances between a cation and its peripheral ions
:return: (float) exponential weighted average
"""
minimum_bond = min(bonds)
weighted_sum = 0.0
total_sum = 0.0
for entry in bonds:
weighted_sum += entry*math.exp(1 - (entry/minimum_bond)**6)
total_sum += math.exp(1-(entry/minimum_bond)**6)
return weighted_sum/total_sum | 13b69a8cc6a88c67b7d6abd0ad68962a038b141a | 694,450 |
def extract_paths(thisjson, parent=[]):
"""
Extracts all paths from a json with nested structures.
You can use the resulting paths with get_json_path_element(j, path)
to get all values from a nested json structure.
E.g. running this function for the following json
thisjson = {
"key":"value",
"dict":{
"nested":"value"
},
"plainlist":[
"value1",
"value2"
],
"nestedlist": [
{
"object":"value"
},
"value3"
]
}
will give ['nestedlist,0,object', 'dict,nested', 'plainlist', 'key']
"""
attributes = []
for key in thisjson.keys():
val = thisjson[key]
if isinstance(val, dict):
attributes.extend(extract_paths(val, parent=parent+[key]))
elif isinstance(val, list):
has_dict = False
for i, item in enumerate(val):
if isinstance(item, dict):
has_dict = True
attributes.extend(extract_paths(item, parent=parent+[key, str(i)]))
if not has_dict:
if parent:
attributes.append(','.join(parent)+','+key)
else:
attributes.append(key)
else:
if parent:
attributes.append(','.join(parent)+','+key)
else:
attributes.append(key)
return attributes | 5fda8da66f9533afeaa39d3141d155db6bf371ad | 694,451 |
def _set_drive_files(config):
"""config의 'drive' 'ids' => file 리스트
Args:
config (dict): google api 설정 파일(ex) settings/api_google_mats.yml) -> dict
Returns:
[dict]: drive 파일 리스트
"""
return {
name: config['prefix']['drive'] + id
for name, id in config['ids']['drive'].items()
} | ab67aa68fddd3153e4bd16577289ea9f67b99a7e | 694,452 |
import os
import sys
def _create_final_filename(output_dir, curdt, model_forcing):
"""Create final filename, following 557 convention."""
name = "%s" %(output_dir)
name += "/PS.557WW"
name += "_SC.U"
name += "_DI.C"
name += "_GP.LIS-S2S-%s" %(model_forcing)
name += "_GR.C0P25DEG"
name += "_AR.AFRICA"
name += "_PA.LIS-S2S"
name += "_DD.%4.4d%2.2d%2.2d" %(curdt.year, curdt.month, curdt.day)
name += "_DT.%2.2d00" %(curdt.hour)
name += "_DF.NC"
if len(os.path.basename(name)) > 128:
print("[ERR] Output file name is too long!")
print("[ERR] %s exceeds 128 characters!" %(os.path.basename(name)))
sys.exit(1)
return name | 153c02cc043e8c0a3f10d5990c1f93477cdde304 | 694,453 |
def find_second(target, string):
""" find the second substr and return the first offset"""
if string == "":
return -1
first_off = string.find(target)
return string.find(target,first_off+1) | 987691557cd1c2cd065d43ac0f83117784b16c77 | 694,454 |
def sum_metrics(*metrics):
"""Sum an iterable of metrics."""
cls = metrics[0].__class__
result = cls()
for metric in metrics:
if metric.__class__ != cls:
raise ValueError('All metrics must be of same type.')
result.results_.extend(metric.results_)
result.uris_.update(metric.uris_)
for cname in metric.components_:
result.accumulated_[cname] += metric.accumulated_[cname]
return result | 7de2241c447910ed3ec59195bb2d175fdd1be85c | 694,455 |
import numpy as np
def histogram_nd(x, nbins=100, axes=None, nbatch=1000, normalize=True):
"""
Non-greedy n-dimemsional histogram.
@param x: input array of rank (-1,n)
@type x: numpy array
@param nbins: number of bins
@type nbins: integer
@param axes: axes used for binning the data (if provided this will be used instead of <nbins>)
@type axes: tuple of two one-dimensional numpy arrays
@param nbatch: size of batch that is used to sort the data into the nD grid
@type nbatch: integer
@param normalize: specifies whether histogram should be normalized
@type normalize: boolean
@return: n-rank array storing histogram, tuple of axes
"""
if len(x.shape) == 1:
x = np.reshape(x, (-1,1))
d = x.shape[1]
if axes is None:
lower, upper = x.min(0), x.max(0)
axes = [np.linspace(lower[i], upper[i], nbins) for i in range(d)]
shape = tuple(map(len, axes))
H = np.zeros(shape)
## MH: was like that before...
## s = np.multiply.accumulate(np.array((1,) + H.shape[:-1]))[::-1]
s = np.multiply.accumulate(np.array((1,) + H.shape[1:]))[::-1]
H = H.flatten()
while len(x):
y = x[:nbatch]
x = x[nbatch:]
I = np.transpose([np.argmin(np.fabs(np.subtract.outer(y[:, i], axes[i])), 1)
for i in range(d)])
I = np.dot(I, s)
I = np.sort(I)
i = list(set(I.tolist()))
n = np.equal.outer(I, i).sum(0)
H[i] += n
if normalize:
H = H / H.sum() / np.multiply.reduce([axes[i][1] - axes[i][0] for i in range(d)])
H = np.reshape(H, shape)
return H, axes | 4ad74b86b3632fb169b1c9669a745222cf74ac03 | 694,456 |
import os
import threading
import shutil
def pre_git_ops(gitfs_dir):
"""
Directs thread to correct dirtydirectory to work from, and creates it if missing
"""
dirtydir = os.path.join(
gitfs_dir,
'dirty_' +
threading.current_thread().name)
puredir = os.path.join(gitfs_dir, 'pure')
# if dir not there, make it
if not os.path.exists(dirtydir):
shutil.copytree(puredir, dirtydir)
return dirtydir | 01e36e3f41a6cc20d9a881a743bf2afbd367b20c | 694,457 |
def _add_resources_to_obj(obj, data, columns):
"""Add resources to obj.vault
"""
i = 0
for s in obj.vault.resources:
if obj.vault.resources[i].id:
name = 'resource_' + str(i + 1)
data += (obj.vault.resources[i].id,)
columns = columns + (name,)
i += 1
return data, columns | 6898edc1eac9476fdfe001fdd50c97f133fd9182 | 694,458 |
import time
def time_to_string(t):
"""Convert time into a formatted string.
Args:
t: input time string in seconds since the Epoch
Returns: formatted time string
"""
if t is None:
return "N/A"
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t)) | 57425ff04857557b611b6598ca0d1da01330b52b | 694,459 |
def count_pos(x):
"""Number of positive values in a numpy.array"""
return (x > 0).sum() | 0c32810e7f0504c1c338519e89080ce0a76b854a | 694,460 |
def get_infer_data_meta(target_feed_list, fields_dict):
"""
:param target_feed_list:
:param fields_dict:
:return:
"""
infer_dict = {"fields": []}
for name in target_feed_list:
for k1, v1 in fields_dict.items(): # dict_keys(['text_a', 'label'])
for k2, v2 in v1.items():
if v2:
for k3 in v2:
# logging.info(k3)
if v2[k3] and v2[k3].name == name:
field_ele = "%s#%s" % (k1, k3)
infer_dict["fields"].append(field_ele)
return infer_dict | 084e27d15ccbff79789ddd2438626c2bcfc0ab8b | 694,461 |
import sqlite3
def getFuzz(type):
"""
:Description: This function retrieves Fuzzing strings from the database.
:param type: Type of the Fuzzing string [sql | xss].
:type type: String
:return: List of Fuzzing strings
"""
conn = sqlite3.connect("db/db.sqlite")
c = conn.cursor()
list = [type]
sql = '''SELECT fuzz, expected from fuzz where type=?'''
c.execute(sql, list)
output = []
for value in c.fetchall(): #the first item is the real payload
output.append([value[0], value[1]])
try:
return output
finally:
conn.close() | c0f0ce32b38964bc5fd520a66f1de4da7d7a1ce0 | 694,462 |
import torch
def _scale_channel(img_chan):
"""Histogram equalization"""
# from https://github.com/pytorch/vision/blob/83171d6ac95e6bf633bcba3fdd1606f1c8c6bf8b/torchvision/transforms/functional_tensor.py#L1299-L1313
hist = torch.histc(img_chan.to(torch.float32), bins=256, min=0, max=255)
nonzero_hist = hist[hist != 0]
step = nonzero_hist[:-1].sum() // 255
if step == 0:
return img_chan
lut = (torch.cumsum(hist, 0) + (step // 2)) // step
lut = torch.nn.functional.pad(lut, [1, 0])[:-1].clamp(0, 255)
return lut[img_chan.to(torch.int64)].to(torch.uint8) | 946d1e8e9d6c4b020814a5cc2ca5138b263a315a | 694,463 |
import os
def inject_special_variables(data, file_path):
"""
Patch the paths.json data structure with special variables in place.
:param file_path: the file path of the loaded paths.json.
:return: the paths.json data structure
"""
env = data['__ENV']
if '_IMPLICIT_ROOT' not in env:
env['_IMPLICIT_ROOT'] = os.path.abspath(os.path.dirname(file_path))
if '_DRIVE_ROOT' not in env:
env['_DRIVE_ROOT'] = os.path.abspath(os.sep)
return data | eb7e5e5eaed61a9c273d1d9fc59a91805c55bb07 | 694,464 |
from os.path import expanduser
import re
def get_pgpass(pgpass=None):
"""
Get postgres' password using pgpass file.
http://www.postgresql.org/docs/9.2/static/libpq-pgpass.html
http://wiki.postgresql.org/wiki/Pgpass
"""
if pgpass is None:
home = expanduser("~")
pgpass = "{0}/.pgpass".format(str(home))
ret = []
with open(pgpass, 'r') as filep:
content = filep.readlines()
for line in content:
res = None
res = re.match(r"^([^:]+):([^:]+):([^:]+):([^:]+):(.*)$", line)
if res is not None:
ret.append(res.group(1, 2, 3, 4, 5))
return ret
raise Exception("pgpass file not found") | f2e0d91980ccc2a9725b6be9b53d8b8c329c7edd | 694,465 |
def Output_to_c(self):
"""Syntax conversion for initialising the QuEST environment."""
parg, bindex = self.pargs
return f'printf("%d ", {parg.name}[{bindex}]);' | f24847fcb451d570ae6c4394df7c77d326873c86 | 694,466 |
def get_all_votes(conn):
"""
Get all data from VoteData table
:param conn:
:return all_votes:
"""
sql = """ SELECT * FROM votedata; """
cur = conn.cursor()
cur.execute(sql)
return cur.fetchall() | 74bbe9b20a907b1f898c22d2934e1fee5009a00d | 694,467 |
import torch
def uniform_attention(queries, values):
"""
In the case of uniform attention, the weight assigned to each value is independent of the value of the
corresponding key; we can simply take the average of all of the values. This is the equivalent of the "vanilla"
neural process, where r* is the average of the context set embeddings.
:param queries: Queries correspond to x_target. [batch_size, N_target, key_size]
:param values: Values corresponding to the aggregated embeddings r_i. [batch_size, N_context, value_size]
:return:
"""
N_target = queries.shape[1]
attention = torch.mean(values, dim=1, keepdim=True) # [batch_size, 1, value_size]
output = attention.repeat(1, N_target, 1) # [batch_size, N_target, value_size]
return output | 1a87ac2705ac7f5c2e97f503bc9182820d668bf4 | 694,468 |
import requests
def get_session(*args, **kwargs):
"""Get requests session, setting global config on here.
Returns:
[requests.seesion]: Requests session.
"""
session = requests.session(*args, **kwargs)
return session | 42c2471011812dd36d98ed30e6a93d4abc19a4f1 | 694,469 |
def age_name(
agenamelist, prefixes=["Lower", "Middle", "Upper"], suffixes=["Stage", "Series"]
):
"""
Condenses an agename list to a specific agename, given a subset of
ambiguous_names.
Parameters
----------
agenamelist : :class:`list`
List of name components (i.e. :code:`[Eon, Era, Period, Epoch]`)
prefixes : :class:`list`
Name components which occur prior to the higher order classification
(e.g. :code:`"Upper Triassic"`).
suffixes : :class:`list`
Name components which occur after the higher order classification
(e.g. :code:`"Cambrian Series 2"`).
"""
ambiguous_names = prefixes + suffixes
ambig_vars = [s.lower().strip() for s in ambiguous_names]
nameguess = agenamelist[-1]
# Process e.g. Stage 1 => Stage
nn_nameguess = "".join([i for i in nameguess if not i.isdigit()]).strip()
# check if the name guess corresponds to any of the ambiguous names
hit = [
ambiguous_names[ix]
for ix, vars in enumerate(ambig_vars)
if nn_nameguess.lower().strip() in vars
][0:1]
if hit:
indexstart = len(agenamelist) - 1
outname = [agenamelist[indexstart]]
out_index_previous = 0
ambiguous_name = True
while ambiguous_name:
hitphrase = hit[0]
indexstart -= 1
nextup = agenamelist[indexstart]
if hitphrase in prefixes:
# insert the higher order component after the previous one
outname.insert(out_index_previous + 1, nextup)
out_index_previous += 1
else:
# insert the higher order component before the previous one
outname.insert(out_index_previous - 1, nextup)
out_index_previous -= 1
_nn_nextupguess = "".join([i for i in nextup if not i.isdigit()]).strip()
hit = [
ambiguous_names[ix]
for ix, vars in enumerate(ambig_vars)
if _nn_nextupguess.lower().strip() in vars
][0:1]
if not hit:
ambiguous_name = False
return " ".join(outname)
else:
return nameguess | 4f5ba0f9f188d3d160909408e8d3e654468b52d3 | 694,470 |
def disable_system_recovery_tools(on=0):
"""Desabilita as Ferramentas de Restauracao do Sistema e Configuracoes
DESCRIPTION
A restauracao do sistema permite aos usuarios reverter configuracoes do
Windows para um ponto anterior (chamados "Pontos de Restauracao"). Esta
entrada pode ser usado para restringir o acesso dos usuarios a essas
ferramentas e configuracoes.
COMPATIBILITY
Windows XP
MODIFIED VALUES
DisableConfig : dword : 00000000 = Desabilitado;
00000001 = Remove opcoes do menu iniciar.
DisableSR : dword : 00000000 = Desabilitado;
00000001 = Desabilita as restauracoes do sistema.
"""
if on:
return '''[HKEY_LOCAL_MACHINE\\SOFTWARE\\Policies\\Microsoft\\Windows \
NT\\SystemRestore]
"DisableConfig"=dword:00000001
"DisableSR"=dword:00000001'''
else:
return '''[HKEY_LOCAL_MACHINE\\SOFTWARE\\Policies\\Microsoft\\Windows \
NT\\SystemRestore]
"DisableConfig"=dword:00000000
"DisableSR"=dword:00000000''' | 18cda7be81893de0b3fb0266fc4b6b35551bdde5 | 694,471 |
def pathFromParent(soup, node):
""" For a given node, walk up the hierarchy in order to find
the first node in the hierarchy above with a 'path' attribute
"""
running = True
current = node
while running:
current = current.parent
path = current.get('path')
if path:
return str(path)
if current == soup.body:
running = False
return None | 14c25a01cd9cc5c58982ed0e9a92954411247370 | 694,472 |
from collections import defaultdict
def evaluation_metrics(predicted, actual, bow=True):
"""
Input:
predicted, actual = lists of the predicted and actual tokens
bow: if true use bag of words assumption
Returns:
precision, recall, F1, Levenshtein distance
"""
if bow:
p = set(predicted)
a = set(actual)
true_positive = 0
for token in p:
if token in a:
true_positive += 1
else:
# shove actual into a hash, count up the unique occurances of each token
# iterate through predicted, check which occur in actual
act = defaultdict(lambda: 0)
for token in actual:
act[token] += 1
true_positive = 0
for token in predicted:
if act[token] > 0:
true_positive += 1
act[token] -= 1
# for shared logic below
p = predicted
a = actual
try:
precision = true_positive / len(p)
except ZeroDivisionError:
precision = 0.0
try:
recall = true_positive / len(a)
except ZeroDivisionError:
recall = 0.0
try:
f1 = 2.0 * (precision * recall) / (precision + recall)
except ZeroDivisionError:
f1 = 0.0
# return (precision, recall, f1, dameraulevenshtein(predicted, actual))
return (precision, recall, f1) | caed08d29bf3318194312e14b24cf5bfee62b217 | 694,473 |
def post_incr_assign_expr(evaluator, ast, state):
"""Evaluates expression "expr++"."""
var = evaluator.eval_ast(ast["expr"], state)
ret = var.val.copy()
var += 1
return ret | 78748568fb0c523fdef8beeb26965985fcf4740a | 694,474 |
import subprocess
def check_filesize(http_fullpath):
"""deprecated, replaced by launch_ffq_ftp()"""
header= subprocess.check_output(f"curl -sI {http_fullpath}", shell=True).decode("utf-8")
if "404 Not Found" in header:
return 0
elif "200 OK" in header:
filesize= int(header.split("Content-Length: ")[1].split("\r")[0])
return filesize | 7fc510eabe0f193b71ab2e52210b037c6b43dbc7 | 694,475 |
from typing import List
def accuracy_score(y_true: List, y_pred: List) -> float:
"""
Compute accuracy score
Parameters
----------
y_true : list
True labels
y_pred : list
Predicted labels
Returns
-------
float
Accuracy score
Examples
--------
>>> from evaluations.classification import accuracy_score
>>> accuracy_score([1, 1, 0, 0], [1, 1, 1, 0])
0.75
"""
count_true = sum([i == j for i, j in zip(y_true, y_pred)])
count_total = len(y_true)
return count_true / count_total | 552cc0fab91b8dd62b08e512fb2d9f232f5b3606 | 694,476 |
def check_columns(data, lst_col):
"""
@param df: dataframe
@param lst_col: column name of dataframe
@return: return 1 if checking condition is true
"""
return list(data.columns) == lst_col | 82575c265f7990024d9fd4c7333b241e40f30b8a | 694,477 |
def factorial(n, show):
"""
=> calculates the factorial of a number.
:param n: The number to be factored.
:param show: (Optional True or False) shows the process or not.
:return: return the value and shows it
"""
f = 1
for c in range(n, 0, -1):
f *= c
if show:
if c == 1:
print(f'{c} = ', end='')
else:
print(f'{c} x ', end='')
return f | 838d4b2b8a6482fd7d3fe7276f2473cdc2a775ed | 694,478 |
from pathlib import Path
import json
def data_dir(tmp_path, data_object):
"""Return a path to a directory loaded with 12 mock files
Args:
tmp_path (pathlib.Path): Path to temporary directory created by generic pytest fixture tmp_path
Returns:
pathlib.Path: Path to temporary directory holding 4 mock files, simulating the data folder
"""
with open(Path(tmp_path, str(".gitkeep")), "w") as f:
f.write("test1")
with open(Path(tmp_path, str("test.xlsx")), "w") as f:
f.write("test2")
for i in range(2):
with open(Path(tmp_path,"2020-01-01T0{}-00-00Z_data.json".format(str(i))), "w") as f:
json.dump(data_object, f)
print(tmp_path)
return tmp_path | 13ab0919d3f41440040ea18b993dd769ee1a3c20 | 694,479 |
def fahrenheit(celsius):
""" Convert tempature celsius into farenheit """
return ((9 * celsius) / 5) + 32 | e09cfe2e6e4402f77d8c15a52dc2d6c3ca3019ef | 694,480 |
def format_structure(instance):
"""
Returns a string representation for the structure
"""
if hasattr(instance, "_fields_"):
out = []
for field in instance._fields_:
out.append("[%s: %s]" % (field[0], getattr(instance, field[0], None)))
return "".join(out)
else:
raise ValueError("Passed argument is not a structure!") | 7a3550837c686f43b9226b0be90c4405a1f27464 | 694,481 |
def thread(x, *fns):
"""Threads `x` left-to-right through the `fns`, returning the final result.
thread x :: a -> a
thread x, *fns :: a_0, *(a_i -> a_i+1) -> a_n"""
for f in fns:
x = f(x)
return x | b114a807b67a0d105f600e0348b21e47661c3d26 | 694,482 |
def rule_precision_cn(cn, rule, pos_idx, neg_idx):
"""Calculate precision value of object's classification.
object : Cond, Rule, or Ruleset
"""
pos_covered = cn.rule_covers(rule, pos_idx)
neg_covered = cn.rule_covers(rule, neg_idx)
total_n_covered = len(pos_covered) + len(neg_covered)
if total_n_covered == 0:
return None
else:
return len(pos_covered) / total_n_covered | b0c1a7200ac6364420dfcc8bdb54dc713e0f4299 | 694,483 |
def calc_new_value(old_measure, new_measure, count):
"""
Factor the total count in to the difference of the new value.
E.g. for a value of 100ms with 50 measurements, a new measure of 150ms would add 1 ms to the total
"""
return (old_measure + (old_measure + (new_measure - old_measure) * (1.0 / count))) / 2.0 | 6bf1850cd2ce7cd332fdf336d31b9f829a7e02f8 | 694,485 |
import os
def safe_open(filename,mode='r'):
"""To avoid forgetting to first expand system
variables in file names and to chop off a trailing newline
it's better to use this function"""
return open(os.path.expandvars(filename.strip()),mode) | 1647d61e974f2a0804cd12773e8da8bc01a4be9d | 694,487 |
def set_ND_volume_roi_with_bounding_box_range(volume, bb_min, bb_max, sub_volume, addition = True):
"""
set a subregion to an nd image. if addition is True, the original volume is added by the subregion.
"""
dim = len(bb_min)
out = volume
if(dim == 2):
if(addition):
out[bb_min[0]:bb_max[0], bb_min[1]:bb_max[1]] += sub_volume
else:
out[bb_min[0]:bb_max[0], bb_min[1]:bb_max[1]] = sub_volume
elif(dim == 3):
if(addition):
out[bb_min[0]:bb_max[0], bb_min[1]:bb_max[1], bb_min[2]:bb_max[2]] += sub_volume
else:
out[bb_min[0]:bb_max[0], bb_min[1]:bb_max[1], bb_min[2]:bb_max[2]] = sub_volume
elif(dim == 4):
if(addition):
out[bb_min[0]:bb_max[0], bb_min[1]:bb_max[1], bb_min[2]:bb_max[2], bb_min[3]:bb_max[3]] += sub_volume
else:
out[bb_min[0]:bb_max[0], bb_min[1]:bb_max[1], bb_min[2]:bb_max[2], bb_min[3]:bb_max[3]] = sub_volume
elif(dim == 5):
if(addition):
out[bb_min[0]:bb_max[0], bb_min[1]:bb_max[1], bb_min[2]:bb_max[2], bb_min[3]:bb_max[3], bb_min[4]:bb_max[4]] += sub_volume
else:
out[bb_min[0]:bb_max[0], bb_min[1]:bb_max[1], bb_min[2]:bb_max[2], bb_min[3]:bb_max[3], bb_min[4]:bb_max[4]] = sub_volume
else:
raise ValueError("array dimension should be 2 to 5")
return out | 8442ee44e178e7721e78ee33d681c7c3ce52e05c | 694,488 |
import os
def list_file_root_folder(folder_path: str = '.'):
"""
List files from a folder
:param folder_path: folder to list from
:return: listed files (and sorted)
"""
return sorted([file for file in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, file))]) | c6e5974745f1bf737b65933efc5aee0af4aa74d3 | 694,489 |
def rpc_completion_callback(callback):
"""Verify callback is callable if not None
:returns: boolean indicating nowait
:rtype: bool
:raises: TypeError
"""
if callback is None:
# No callback means we will not expect a response
# i.e. nowait=True
return True
if callable(callback):
# nowait=False
return False
else:
raise TypeError('completion callback must be callable if not None') | 4149e3faa3bcff54ed494ad2ca53fa2457a8f694 | 694,490 |
import torch
def l2_loss(pred, target):
"""L2 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
assert pred.size() == target.size() and target.numel() > 0
loss = torch.abs(pred - target)**2
return loss | 078b641ec7f6ec8fb88be188be214f2ead9c7dc8 | 694,491 |
def can_monitor(message):
"""
Determine if the user who sent the message is a race monitor.
Returns False if monitor status is indeterminate, e.g. message was sent
by a bot instead of a user.
"""
return message.get('is_monitor', False) | 5370e2a41ad1c3e0b7173561aeaf19eec642181c | 694,492 |
import os
import logging
def parameters_option(step_num, model, ckpt_dir, option='Saving'):
"""Save or load the model parameter, marked by step_num."""
param_path = os.path.join(
ckpt_dir, '{}.params'.format(str(step_num).zfill(7)))
logging.info('[step {}], {} model params to/from {}.'.format(
step_num, option, param_path))
if option == 'Saving':
model.save_parameters(param_path)
return param_path
elif option == 'Loading':
model.load_parameters(param_path)
return model
else:
raise NotImplementedError('Unknown Option: {}'.format(option)) | 49a1927a44570476cd713d4ca31a95c9c8bc7997 | 694,493 |
import os
import sys
def find_dbpath():
"""Find the database file in the specified order and return its path.
The search paths (in the order of priority) are:
1. The directory of the package,
2. that of the executable
3. and the current directory.
"""
dbname = 'ucd.sqlite3'
dbpath = os.path.join(os.path.dirname(__file__), dbname)
if (os.path.exists(dbpath)):
return dbpath
dbpath = os.path.join(os.path.dirname(sys.executable), dbname)
if (os.path.exists(dbpath)):
return dbpath
dbpath = os.path.join(os.getcwd(), dbname)
if (os.path.exists(dbpath)):
return dbpath
return None | 777815b7ad9477766f9bd66ddf519b3a74a308c8 | 694,494 |
import yaml
def yaml_tag_constructor(loader, tag, node):
"""convert shorthand intrinsic function to full name
"""
def _f(loader, tag, node):
if tag == '!GetAtt':
return node.value.split('.')
elif type(node) == yaml.SequenceNode:
return loader.construct_sequence(node)
else:
return node.value
if tag == '!Ref':
key = 'Ref'
else:
key = 'Fn::{}'.format(tag[1:])
return {key: _f(loader, tag, node)} | be3356624679095940584319c882962aa9e692ea | 694,495 |
import pickle
def load(filename: str):
"""Loads a network from file and returns a list of modules in that network.
The connections between the module have been set according to the file.
Args:
filename (str): The path to the .rtc file containing a network.
Returns:
(list, list): A list of Modules that are connected and ready to be run
and a list of connections between those modules.
"""
mc_list = pickle.load(open(filename, "rb"))
module_dict = {}
module_list = []
connection_list = []
for m in mc_list[0]:
mod = m["retico_class"](**m["args"])
module_dict[m["id"]] = mod
module_list.append(mod)
for ida, idb in mc_list[1]:
module_dict[idb].subscribe(module_dict[ida])
connection_list.append((module_dict[idb], module_dict[ida]))
return (module_list, connection_list) | 39f8f8e7f96c44528c3f691a5566760256c284fd | 694,496 |
def get_innovation_check_flags(estimator_status: dict) -> dict:
"""
:param estimator_status:
:return:
"""
innov_flags = dict()
# innovation_check_flags summary
# 0 - true if velocity observations have been rejected
# 1 - true if horizontal position observations have been rejected
# 2 - true if true if vertical position observations have been rejected
# 3 - true if the X magnetometer observation has been rejected
# 4 - true if the Y magnetometer observation has been rejected
# 5 - true if the Z magnetometer observation has been rejected
# 6 - true if the yaw observation has been rejected
# 7 - true if the airspeed observation has been rejected
# 8 - true if synthetic sideslip observation has been rejected
# 9 - true if the height above ground observation has been rejected
# 10 - true if the X optical flow observation has been rejected
# 11 - true if the Y optical flow observation has been rejected
innov_flags['vel_innov_fail'] = ((2 ** 0 & estimator_status['innovation_check_flags']) > 0) * 1
innov_flags['posh_innov_fail'] = ((2 ** 1 & estimator_status['innovation_check_flags']) > 0) * 1
innov_flags['posv_innov_fail'] = ((2 ** 2 & estimator_status['innovation_check_flags']) > 0) * 1
innov_flags['magx_innov_fail'] = ((2 ** 3 & estimator_status['innovation_check_flags']) > 0) * 1
innov_flags['magy_innov_fail'] = ((2 ** 4 & estimator_status['innovation_check_flags']) > 0) * 1
innov_flags['magz_innov_fail'] = ((2 ** 5 & estimator_status['innovation_check_flags']) > 0) * 1
innov_flags['yaw_innov_fail'] = ((2 ** 6 & estimator_status['innovation_check_flags']) > 0) * 1
innov_flags['tas_innov_fail'] = ((2 ** 7 & estimator_status['innovation_check_flags']) > 0) * 1
innov_flags['sli_innov_fail'] = ((2 ** 8 & estimator_status['innovation_check_flags']) > 0) * 1
innov_flags['hagl_innov_fail'] = ((2 ** 9 & estimator_status['innovation_check_flags']) > 0) * 1
innov_flags['ofx_innov_fail'] = ((2 ** 10 & estimator_status['innovation_check_flags']) > 0) * 1
innov_flags['ofy_innov_fail'] = ((2 ** 11 & estimator_status['innovation_check_flags']) > 0) * 1
return innov_flags | dac5edea20316e39296c4a62418b58b9c3e256e0 | 694,497 |
from datetime import datetime
def updated_saving(file_path, fish_id, fish_age):
"""
Initializes saving: saves texture classes and params for
input-coupled stimulus classes.
"""
if '\\' in file_path:
file_path = file_path.replace('\\', '/')
print(f"Saving data to {file_path}")
filestream = open(file_path, "a")
filestream.write(f"fish{fish_id}_{fish_age}dpf_{datetime.now()}")
filestream.flush()
return filestream | f2bd06e427ba0d04902939393222a3bfabfde325 | 694,498 |
def calculateMRR(ranks):
"""
Return an MRR score based on the list of rank predictions
"""
MRR = 0
for rank in ranks:
MRR += 1.0 / rank
return MRR / len(ranks) | 006634507655a3c48be960bd9dbceba68e7f4f68 | 694,499 |
def _to_bytes_or_false(val):
"""
An internal graph to convert the input to a bytes or to False.
The criteria for conversion is as follows and should be python 2 and 3
compatible:
- If val is py2 str or py3 bytes: return bytes
- If val is py2 unicode or py3 str: return val.decode('ascii')
- Otherwise, return False
"""
if isinstance(val, bytes):
return val
else:
try:
return val.encode('ascii')
except AttributeError:
return False | 6fd24ecdb94784d0204cb50b43f7369bf7ac420a | 694,500 |
def get_omelet_ingredients(omelet_name):
"""This contains dictionary of all omelet types that can be produced and their ingredients"""
# all omelets need eggs and milk
ingredients = {"eggs" : 2, "milk": 1}
if omelet_name == "cheese":
ingredients["cheddar"] = 2
elif omelet_name == "western":
ingredients["ham"] = 1
else:
print("Sorry that's not on the menu")
return None
return ingredients | 043f961b91836d52717457e3f8052d623b79e433 | 694,501 |
def mod_operator():
"""%: Modulo operator."""
class _Operand:
def __mod__(self, other):
return 34 - other * (34 // other)
return _Operand() % 5 | 4a547a67f7ce73d1faaf2926df33c75df2c231ee | 694,502 |
def pad(phrase, maxlen):
""" right pad given string with \x00 to exact "maxlen" length """
return phrase + u'\x00' * (maxlen - len(phrase)) | cdf57c50c8febcfafa2da6eb11f3422928e5618c | 694,503 |
def get_rss(model, data, y):
"""
get prediction, then calc. rss
"""
preds = model.predict(data) # First get the predictions
diff = y - preds # Then compute the residuals/errors
rss = (diff * diff).sum() # Then square and add them up
return rss | 735c0f1f69205697f7d539083b75ff809bdfb116 | 694,505 |
def _get_tag_path(repository, tag=None):
"""Return the path for a tag, or list of tags if tag is empty.
"""
if tag:
return '/acr/v1/{}/_tags/{}'.format(repository, tag)
return '/acr/v1/{}/_tags'.format(repository) | 51500ae144362c27c65ecc2e862ff043ef0f565e | 694,506 |
def cmp_lines(path_1, path_2):
"""Compare two files, ignoring line-endings"""
line_1 = line_2 = ' '
with open(path_1, 'r') as file_1:
with open(path_2, 'r') as file_2:
while line_1 != '' and line_2 != '':
line_1 = file_1.readline()
line_2 = file_2.readline()
if line_1 != line_2:
return False
return True | ad6b4b6053806976fd4b8cfd385cb9e584f19151 | 694,507 |
def pastis_matrix_measurements(nseg):
"""
Calculate the total number of measurements needed for a PASTIS matrix with nseg segments
:param nseg: int, total number of segments
:return: int, total number of measurements
"""
total_number = (nseg**2 + nseg) / 2
return int(total_number) | 133cb69837651ac3e6d0891179b365367fe848cc | 694,508 |
def convert_percent(val):
"""
Convert the percentage string to an actual floating point percent
"""
new_val = val.replace('%', '')
return float(new_val) / 100 | 4d213cf7b767ba82858bb5d922f6e2c16b7d251e | 694,509 |
def ordinal(num):
"""
Magic that appends the order to the n'th number example: 'th is the ordinal of 4, which gives us 4'th.
>>> ordinal(1)
1'st
>>> ordinal(3)
3'rd
>>> ordinal(4)
4'th
>>> ordinal(21)
21'st
"""
return "%d%s%s" % (num, "'", "tsnrhtdd"[(num // 10 % 10 != 1) * (num % 10 < 4) * num % 10::4]) | 8a907abb12669c455df3aea99f1c42710f06eea1 | 694,510 |
from typing import List
import re
def split_text(text: str, max_length: int = 4096) -> List[str]:
"""Splits text by lines. If some line is too long, by spaces
"""
chunks = text.splitlines(keepends=True)
ans = []
cur = ""
while chunks:
cur_chunk = chunks.pop(0)
if len(cur_chunk) > max_length:
split_chunk = re.split('(\S*\s)', cur_chunk) # Split by whitespace, saving the delimeter
if len(split_chunk) == 1:
# if no spaces, split by length
split_chunk = [
cur_chunk[i: i + max_length]
for i in range(0, len(cur_chunk), max_length)
]
chunks = split_chunk + chunks
continue
if len(cur) + len(cur_chunk) > max_length:
ans.append(cur)
cur = cur_chunk
else:
cur += cur_chunk
if cur:
ans.append(cur)
return ans | e3b9cc032020c96a3365d34dc49a0854bc6760b1 | 694,511 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.