content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
import warnings
def get_ann_text(sel):
"""
Compute an annotating text for a `Selection` (passed **unpacked**).
This is a single-dispatch function; implementations for various artist
classes follow.
"""
warnings.warn(
f"Annotation support for {type(sel.artist).__name__} is missing")
return "" | 2f7b621a49c48aa4606c14b2d5669e64bc73fd15 | 693,951 |
def merge(left_array, right_array):
"""
1. Compare first element of both arrays and append the smaller element
2. Increment the index for the array whose element has been pushed to merged array
3. Follow steps 1 and 2 till either of two array is completely merged
4. Whichever array has remaining elements merge that directly as that is already sorted.
:param left_array: one part of array
:param right_array: other part of array
:return: merged_array in sorted form
"""
left_n = len(left_array)
right_n = len(right_array)
i = 0
j = 0
merged_array = list()
while True:
if left_array[i] <= right_array[j]:
merged_array.append(left_array[i])
i += 1
else:
merged_array.append(right_array[j])
j += 1
if i == left_n:
merged_array += right_array[j:right_n]
break
elif j == right_n:
merged_array += left_array[i:left_n]
break
return merged_array | f4997c3e7a4d67b3dd8f2f0c86335b28e29d9450 | 693,952 |
def find_related_forms(self,form_name,form_dict,foreign_forms=None):
"""
Finds the form_name value in the form_dict. If it is found, the function
will call itself using form_dict[form_name]. The form_dict is a dictionary
with the keys being a form name and the value being the name of the form they
have a foreign key relation with. Ex: form_dict['Microarray 1'] = 'Prior Gen Testing'
This function will continue until no more related forms are found, and will return a
list of them, in order from highest to deepest form relation
"""
if foreign_forms is None:
foreign_forms = [];
if form_name in form_dict and not form_name in foreign_forms:
foreign_forms.append(form_name);
find_related_forms(self,form_dict[form_name],form_dict,foreign_forms);
return foreign_forms; | 3568dcb5383ddddc8b1e5eb0306654a972a425db | 693,953 |
def identity(func):
"""The identity decorator."""
# this method provides a
def inner(*args,**kwargs):
return func(*args,**kwargs)
inner.__name__ = func.__name__
inner.__doc__ = func.__doc__
return inner | 6ecd1af13adb9a0486c036b1867338374806c625 | 693,955 |
def get_dat_id(datnum, datname):
"""Returns unique dat_id within one experiment."""
name = f'Dat{datnum}'
if datname != 'base':
name += f'[{datname}]'
return name | 4505b1932e89c6e76073605011aa6260563daa84 | 693,956 |
import ctypes
def _IsStructType(t):
"""Tests if a type is a structure."""
return type(t) is type(ctypes.Structure) | 7815d9fc840d3fbd29625404b96fa60eb209dd85 | 693,957 |
def electric_consume(data, meta):
"""
Transfer function to change sign of electricity consumed, for cashflows
@ In, data, dict, partial requested activity
@ In, meta, dict, additional info
@ Out, data, dict, filled-in requested activity
@ Out, meta, dict, additional info (possibly modified, possibly not)
"""
activity = meta['HERON']['activity']
amount = -1 * activity['electricity']
data = {'driver': amount}
return data, meta | 32f6a95d29dbbe50bb573b0cfcaeab042d2fcd38 | 693,958 |
def asini_c(pb, mf):
"""
asini_c(pb, mf):
Return the orbital projected semi-major axis (lt-sec) given:
'pb' is the binary period in sec.
'mf' is the mass function of the orbit.
"""
return (mf * pb * pb / 8015123.37129)**(1.0 / 3.0) | 1dd86e3619f2334d6d11c14bc2a7b28c9edb9dcb | 693,961 |
async def root():
"""
Useful for health checks
:return: a simple static JSON object
"""
return {"status": "ok"} | c49c7b99ce096692f3fe5f97e198697fc61c4ccb | 693,962 |
import os
import subprocess
def rsync(original, new):
""" Uses subprocess.call to rsync from 'filename' to 'new'
If new is directory, copies original in.
If new is new file, copies original to that name.
"""
assert os.path.exists(original), 'Need original file!'
res = subprocess.call(["rsync", "--timeout", "30", "-a", original.rstrip('/'), new.rstrip('/')])
return int(res == 0) | ab60136e6c622df88dca15c07d487f86fcde78c6 | 693,963 |
import re
def preprocess_dash(arg: str) -> str:
"""Replace any dashes with tildes, otherwise argparse will assume they're options"""
return re.sub(r"^-(?=([\w_\*]+/[\w_\*]+)|(\d+)$)", "~", arg) | c92b2dbc259f455567749c6be76969c6132ec519 | 693,966 |
def argkvpair(val):
"""Splitting key value pair"""
k, v = val.split("=", 1)
return k, v | 9da942bbc32b402217cebcbc1a39d637c15f835b | 693,967 |
import re
import logging
def edit_header(in_vcf):
"""
Create an edited header
return the header as well as contig information from vcf header
"""
header = []
ctgre = re.compile(r"##contig=<ID=(?P<name>.*),length=(?P<length>\d+)>$")
contigs = []
while True:
line = in_vcf.readline().decode()
header.append(line)
mat = ctgre.match(line)
if mat:
mat = mat.groupdict()
contigs.append((mat["name"], int(mat["length"])))
if line.startswith("#CHROM"):
num_cols = line.strip().split('\t')
if len(num_cols) != 10:
logging.error(f"Input VCF doesn't have exactly 10 columns (found {num_cols})")
logging.error("Input VCF must only have a single sample")
exit(1)
break
return header, contigs | c6cd687082c52c046b4d574fe2f54b81640c66fd | 693,968 |
from typing import Any
from typing import List
from typing import Dict
def check_is_list_of_dicts(x: Any) -> List[Dict[Any, Any]]:
"""Return `x` if it is `List[Dict[Any, Any]]`, raise `ValueError`."""
if not isinstance(x, list):
raise ValueError()
for element in x:
if not isinstance(element, dict):
raise ValueError()
return x | b9c0c09607543e52f29ab678a4a27198e2f8642e | 693,969 |
def php_str_repeat(_input, _multiplier):
"""
>>> php_str_repeat('*', 10)
'**********'
>>> php_str_repeat('xyz', 3)
'xyzxyzxyz'
>>> php_str_repeat('xyz', 0)
''
"""
return _input * _multiplier | f083f3041c324001caa4791e0fda61aaedb9aa50 | 693,970 |
import argparse
def positive_value(num):
"""
Checks whether num is positive number and returns it in float format.
Parameters
----------
num : str
Amount of money.
Returns
-------
float
Amount of money in float format.
"""
try:
value = float(num)
if value < 0:
msg = "{} is not positive number.".format(num)
raise argparse.ArgumentTypeError(msg)
return value
except ValueError:
raise argparse.ArgumentTypeError('"{}" is not number.'.format(num)) | 1d682035a42eb8a33f4bf582de5a51639849455f | 693,971 |
def header_translate_inverse(header_name):
"""Translate parameter names back from headers."""
name_dict = {'XCENREF': 'xcen_ref',
'YCENREF': 'ycen_ref',
'ZENDIR': 'zenith_direction',
'ZENDIST': 'zenith_distance',
'FLUX': 'flux',
'BETA': 'beta',
'BCKGRND': 'background',
'ALPHAREF': 'alpha_ref',
'TEMP': 'temperature',
'PRESSURE': 'pressure',
'VAPPRESS': 'vapour_pressure'}
return name_dict[header_name] | ffd333d190530a70aca014438cb3b49eab032e42 | 693,972 |
def handler(event, context):
"""
Execute pickled function in remote Lambda environment using all globals from source environment
"""
print('Passed event: {}'.format(event))
print('Passed context: {}'.format(context))
job = None
return {
'statusCode': 200,
'body': str(b'bytes response'),
'isBase64Encoded': False,
'headers': {'Content-Type': 'application/octet-stream'}
} | 3087be09fa3b7c0bda4487ebcffabd1fa4357b3c | 693,973 |
from typing import IO
import io
import subprocess
def decompress_raw_data_by_zcat(raw_data_fd: IO, add_bracket: bool = True):
"""Experimental: Decompresses raw data from file like object with zcat. Otherwise same as decompress_raw_data.
Args:
raw_data_fd: File descriptor object.
add_bracket: Whether, or not to add brackets around the output. (Default value = True)
Returns:
A byte array of the decompressed file.
"""
writer = io.BytesIO()
if add_bracket:
writer.write(b'[')
p = subprocess.Popen(["zcat"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
writer.write(p.communicate(input=raw_data_fd.read())[0])
if add_bracket:
writer.write(b']')
return writer.getvalue() | ba7ce5208a6f8daf9f318b5ef174d3f52f00e50a | 693,974 |
import jinja2
def render_without_request(template_name, **template_vars):
"""
Usage is the same as flask.render_template:
render_without_request('my_template.html', var1='foo', var2='bar')
"""
env = jinja2.Environment(
loader=jinja2.PackageLoader('server','templates')
)
template = env.get_template(template_name)
return template.render(**template_vars) | 37fd509f08c7b3f3f07926b130a5739001ecadd2 | 693,975 |
def tsne_kwargs(n_dims, initial_dims, perplexity=25.0, n_epochs=1000):
"""Argument options for t-SNE.
Args:
n_dims (int): Number of dimensions to reduce the data down to.
initial_dims (int): Initial number of dimensions of the input data.
perplexity (float): Related to number of nearest neighbors parameter used in other manifold learning algorithms. Larger datasets usually require a larger perplexity. Consider selecting a value between 5 and 50.
n_epochs (int): Number of epochs to fit for.
Returns:
dict: Dictionary for kwargs for t-SNE.
"""
return {
"no_dims": n_dims,
"initial_dims": initial_dims,
"perplexity": perplexity,
"n_epochs": n_epochs
} | 4899997827237b383efb60eb6a2b2b230b879170 | 693,976 |
def get_dic(f, dataset="spectrum"):
"""
Get a dictionary from dataset in a HDF5 File
"""
# select the data set
dset = f[dataset]
dic = {}
# loop over the attributes
for key, value in dset.attrs.items():
if "_" in key:
# we have an axis key
axis, subkey = key.split("_", 1)
axis = int(axis)
if axis not in dic:
dic[axis] = {}
dic[axis][subkey] = value
else:
dic[key] = value
return dic | f6cb53e74e0cdde4b5c619ec3bb5471a53e229bc | 693,977 |
import os
def check_already_extracted(feature_dir):
"""Check to see if we created the -0001 frame of this file."""
return bool(os.path.exists(os.path.join(feature_dir ,'*.txt'))) | f7425651dc7125dd77e8ea0a0e5e56bbb83c3a08 | 693,978 |
def restriction(d, keys):
"""Return the dictionary that is the subdictionary of d over the specified
keys"""
return {key: d.get(key) for key in keys} | 9fdb2d2e5bea0d96380e592ffc6d7720b32ade30 | 693,979 |
import re
def is_valid_phone(phone_number: str):
"""Return if the phone_number is valid or not."""
if phone_number:
phone_number = phone_number.replace(' ', '').replace('(', '').replace(')', '')
return re.match(
r'[\+\d]?(\d{2,3}[-\.\s]??\d{2,3}[-\.\s]??\d{4}|\(\d{3}\)\s*\d{3}[-\.\s]??\d{4}|\d{3}[-\.\s]??\d{4})',
phone_number) is not None
return False | d9837f9a53a59fdbf92f2c16325a6c4a7c61e1c6 | 693,980 |
def e_m(val):
"""energy multiplier"""
if val == 5: return 10
elif val >= 6: return 100
else: return None | dc24986ab270b22e1037c77091de58c4b15a4474 | 693,982 |
import torch
def get_qkv(inp_list):
"""The 1st element of the inp_list is about the template,
the 2nd (the last) element is about the search region"""
dict_x = inp_list[-1]
dict_c = {"feat": torch.cat([x["feat"] for x in inp_list], dim=0),
"mask": torch.cat([x["mask"] for x in inp_list], dim=1),
"pos": torch.cat([x["pos"] for x in inp_list], dim=0)} # concatenated dict
q = dict_x["feat"] + dict_x["pos"]
k = dict_c["feat"] + dict_c["pos"]
v = dict_c["feat"]
key_padding_mask = dict_c["mask"]
return q, k, v, key_padding_mask | ae5cc39cbfc7fdc3a755e35ad0f3d44ae3c8ae5f | 693,983 |
def binary_to_int(value):
"""Convert binary number to an integer."""
return int(value, 2) | 792b9a01883dc460fb23d10f8415fa6656918c56 | 693,984 |
from pathlib import Path
import re
def read_input():
"""Read input file, return string, and drop unexpected characters."""
text = Path(__file__).with_name('input.txt').read_text()
return re.sub(r'[^<>^v]', '', text) | e02cb930464c6a4cf1973ed2cee123877a6a0b2b | 693,985 |
import subprocess
def is_elf(filepath):
"""Returns True if |filepath| is an ELF file."""
result = subprocess.run(['file', filepath],
stdout=subprocess.PIPE,
check=False)
return b'ELF' in result.stdout | e97f06cb785d99fc40cbc757db435a9cc396fd24 | 693,986 |
def box_overlap(box1, box2, resolution, padding=0):
"""Incoming format is y1, x1, y2, x2.
Padding is optional, but can improve post-processing.
"""
box1_ymin, box1_xmin, box1_ymax, box1_xmax = box1
box2_ymin, box2_xmin, box2_ymax, box2_xmax = box2
if box1_ymin == resolution[1]:
box1_ymin -= padding
elif box2_ymin == resolution[1]:
box2_ymin -= padding
elif box1_ymax == resolution[1]:
box1_ymax -= padding
elif box2_ymax == resolution[1]:
box2_ymax -= padding
elif box1_xmin == resolution[0]:
box1_xmin -= padding
elif box2_xmin == resolution[0]:
box2_xmin -= padding
elif box1_xmax == resolution[0]:
box1_xmax -= padding
elif box2_xmax == resolution[0]:
box2_xmax -= padding
if box1_ymin == 0:
box1_ymin += padding
elif box2_ymin == 0:
box2_ymin += padding
elif box1_ymax == 0:
box1_ymax += padding
elif box2_ymax == 0:
box2_ymax += padding
elif box1_xmin == 0:
box1_xmin += padding
elif box2_xmin == 0:
box2_xmin += padding
elif box1_xmax == 0:
box1_xmax += padding
elif box2_xmax == 0:
box2_xmax += padding
return not (
box1_xmax + padding < box2_xmin - padding or
box1_xmin - padding > box2_xmax + padding or
box1_ymax + padding < box2_ymin - padding or
box1_ymin - padding > box2_ymax + padding
) | 2d191d89e486bdfbd1f0b91d5da002910ea38086 | 693,987 |
def mul_with_none(v1,v2):
""" Standard mul treating None with zero """
if v1 is None:
return None
elif v2 is None:
return None
else:
return v1 * v2 | 7065344b6b0efa29cbca14a7ddf85a3ade245fa1 | 693,988 |
def root_node(tree, level):
"""
:param tree: a tree node
:param level: level of the subtree (0 for the tree root)
:return: subtree root at level
"""
root = tree
while root.level > level:
root = root.parent
return root | ae41c019e6e2c395343aa64d32ac6a700e451a24 | 693,989 |
def get_patch_position(tile,patch):
"""
tile: tuple -> (tile x position, tile y position, tile size)
patch: tuple -> (patch x position, patch y position, patch size)
"""
tx,ty,ts = tile
px,py,ps = patch
return (px-tx,py-ty,ps) | 03c1cbf6c32a2ec8f8ea048297f03aaa836c26a7 | 693,992 |
def StringsContainSubstrings(strings, substrings):
"""Returns true if every substring is contained in at least one string.
Looks for any of the substrings in each string. Only requires each substring
be found once.
Args:
strings: List of strings to search for substrings in.
substrings: List of strings to find in `strings`.
Returns:
True if every substring is contained in at least one string.
"""
needles = set(substrings)
for s in strings:
if not needles:
break
for substr in needles:
if substr in s:
needles.remove(substr)
break
if not needles:
return True
return False | 52c4bf8b4b57a9ebb619c71276881668b6b2a1d8 | 693,993 |
def U234():
"""MF8/MT457 from JEFF-3.3 U-234. It was selected because it contains
both continuous and discrete spectra.
"""
text = """ 9.223400+4 2.320300+2 0 0 0 63541 8457 1
7.75370+12 9.467280+9 0 0 6 03541 8457 2
1.414380+4 1.194650+3 1.450190+3 1.343010+2 4.841980+6 1.484590+33541 8457 3
0.000000+0 1.000000+0 0 0 12 23541 8457 4
4.000000+0 0.000000+0 4.857900+6 8.000000+2 1.000000+0 0.000000+03541 8457 5
6.000000+0 0.000000+0 1.761000+8 3.310000+6 1.70000-11 1.00000-123541 8457 6
0.000000+0 0.000000+0 2 0 6 63541 8457 7
1.230000-3 0.000000+0 1.072340+2 1.226460+0 1.37428-10 1.59383-113541 8457 8
5.320000+4 2.000000+1 0 0 12 03541 8457 9
4.000000+0 0.000000+0 1.000000+0 1.626020-2 0.000000+0 0.000000+03541 8457 10
2.330000+2 4.893000+0 0.000000+0 0.000000+0 1.700000+2 3.570000+03541 8457 11
1.209000+5 2.000000+1 0 0 12 03541 8457 12
4.000000+0 0.000000+0 2.780490-1 4.065040-3 0.000000+0 0.000000+03541 8457 13
5.043000+0 1.008600-1 2.530000-1 5.060000-3 3.490000+0 6.980000-23541 8457 14
4.549500+5 5.000000+1 0 0 12 03541 8457 15
4.000000+0 0.000000+0 2.032520-4 4.065040-5 0.000000+0 0.000000+03541 8457 16
0.000000+0 0.000000+0 0.000000+0 0.000000+0 0.000000+0 0.000000+03541 8457 17
5.082000+5 1.000000+2 0 0 12 03541 8457 18
4.000000+0 0.000000+0 1.219510-4 4.065040-5 0.000000+0 0.000000+03541 8457 19
0.000000+0 0.000000+0 0.000000+0 0.000000+0 0.000000+0 0.000000+03541 8457 20
5.817000+5 2.000000+2 0 0 12 03541 8457 21
4.000000+0 0.000000+0 9.756100-5 4.065040-5 0.000000+0 0.000000+03541 8457 22
0.000000+0 0.000000+0 0.000000+0 0.000000+0 0.000000+0 0.000000+03541 8457 23
6.349000+5 2.000000+2 0 0 12 03541 8457 24
4.000000+0 0.000000+0 2.439020-4 8.130080-5 0.000000+0 0.000000+03541 8457 25
0.000000+0 0.000000+0 0.000000+0 0.000000+0 0.000000+0 0.000000+03541 8457 26
6.000000+0 0.000000+0 0 0 1 153541 8457 27
15 2 3541 8457 28
0.000000+0 0.000000+0 1.400000+5 9.165600-7 3.000000+5 8.862800-73541 8457 29
5.000000+5 1.104630-6 7.000000+5 6.165900-7 1.000000+6 3.530300-73541 8457 30
1.500000+6 1.528700-7 2.000000+6 8.133000-8 2.500000+6 5.967000-83541 8457 31
3.000000+6 2.922000-8 4.000000+6 1.182000-8 5.000000+6 3.640000-93541 8457 32
6.000000+6 2.120000-9 7.000000+6 1.40000-10 1.000000+7 0.000000+03541 8457 33
0.000000+0 4.000000+0 0 0 6 53541 8457 34
1.000000+0 0.000000+0 4.841980+6 1.484590+3 0.000000+0 0.000000+03541 8457 35
4.150800+6 9.000000+2 0 0 6 03541 8457 36
4.000000+0 0.000000+0 3.000000-7 1.000000-7 0.000000+0 0.000000+03541 8457 37
4.275400+6 9.000000+2 0 0 6 03541 8457 38
4.000000+0 0.000000+0 4.000000-7 1.000000-7 0.000000+0 0.000000+03541 8457 39
4.603800+6 9.000000+2 0 0 6 03541 8457 40
4.000000+0 0.000000+0 1.990000-3 2.000000-5 0.000000+0 0.000000+03541 8457 41
4.722600+6 9.000000+2 0 0 6 03541 8457 42
4.000000+0 0.000000+0 2.842000-1 2.000000-4 0.000000+0 0.000000+03541 8457 43
4.774900+6 8.000000+2 0 0 6 03541 8457 44
4.000000+0 0.000000+0 7.137000-1 2.000000-4 0.000000+0 0.000000+03541 8457 45
0.000000+0 5.000000+0 1 0 6 03541 8457 46
0.000000+0 0.000000+0 5.608300-5 6.684760-6 3.06000-11 3.55016-123541 8457 47
6.000000+0 0.000000+0 0 0 1 383541 8457 48
38 4 3541 8457 49
1.221780-2 9.23550-11 1.221780-1 2.92052-10 1.221780+0 9.23550-103541 8457 50
1.221780+1 2.920490-9 1.221780+2 9.234580-9 1.221780+3 2.917600-83541 8457 51
2.443570+3 4.121990-8 3.665350+3 5.043340-8 4.887130+3 5.817730-83541 8457 52
6.108910+3 6.497910-8 9.163370+3 7.938420-8 1.221780+4 9.143610-83541 8457 53
1.832680+4 1.114270-7 3.665350+4 1.552360-7 6.108920+4 1.964410-73541 8457 54
1.221780+5 2.642600-7 2.443570+5 3.381560-7 3.665350+5 3.747420-73541 8457 55
4.887130+5 3.915370-7 6.108920+5 3.960940-7 7.330700+5 3.926080-73541 8457 56
8.552490+5 3.837100-7 9.774270+5 3.711680-7 1.099610+6 3.562190-73541 8457 57
1.221780+6 3.397550-7 1.527230+6 2.958340-7 1.832680+6 2.523860-73541 8457 58
2.138120+6 2.123070-7 2.443570+6 1.767610-7 3.054460+6 1.198650-73541 8457 59
3.665350+6 7.964110-8 4.887130+6 3.383080-8 6.108920+6 1.391470-83541 8457 60
7.330700+6 5.607490-9 8.552490+6 2.228170-9 1.221780+7 1.32591-103541 8457 61
1.832680+7 1.09418-12 2.443570+7 8.51306-15 3541 8457 62
0.000000+0 6.000000+0 0 0 6 03541 8457 63
0.000000+0 0.000000+0 2.818600-3 1.755860-4 1.70000-11 1.00000-123541 8457 64
0.000000+0 8.000000+0 0 0 6 83541 8457 65
1.000000+0 0.000000+0 1.414380+4 1.414380+3 0.000000+0 0.000000+03541 8457 66
4.176000+3 2.088000+2 0 0 6 03541 8457 67
4.000000+0 0.000000+0 3.224940-1 3.224940-2 0.000000+0 0.000000+03541 8457 68
9.478000+3 4.739000+2 0 0 6 03541 8457 69
4.000000+0 0.000000+0 1.165410-1 1.165410-2 0.000000+0 0.000000+03541 8457 70
1.124910+4 5.624550+2 0 0 6 03541 8457 71
4.000000+0 0.000000+0 8.652600-5 8.652600-6 0.000000+0 0.000000+03541 8457 72
3.689970+4 1.844980+3 0 0 6 03541 8457 73
4.000000+0 0.000000+0 2.091000-1 2.091000-2 0.000000+0 0.000000+03541 8457 74
4.902400+4 2.451200+3 0 0 6 03541 8457 75
4.000000+0 0.000000+0 7.749000-2 7.749000-3 0.000000+0 0.000000+03541 8457 76
7.912000+4 3.956000+3 0 0 6 03541 8457 77
4.000000+0 0.000000+0 2.163150-6 2.163150-7 0.000000+0 0.000000+03541 8457 78
1.046000+5 5.229990+3 0 0 6 03541 8457 79
4.000000+0 0.000000+0 1.193580-3 1.193580-4 0.000000+0 0.000000+03541 8457 80
1.167240+5 5.836200+3 0 0 6 03541 8457 81
4.000000+0 0.000000+0 4.446000-4 4.446000-5 0.000000+0 0.000000+03541 8457 82
0.000000+0 9.000000+0 0 0 6 73541 8457 83
1.000000+0 0.000000+0 1.342960+3 1.342960+2 0.000000+0 0.000000+03541 8457 84
4.176000+3 8.352000+1 0 0 6 03541 8457 85
4.000000+0 0.000000+0 1.840870-2 1.840870-3 0.000000+0 0.000000+03541 8457 86
1.340900+4 2.681800+2 0 0 6 03541 8457 87
4.000000+0 0.000000+0 9.382160-2 9.382160-3 0.000000+0 0.000000+03541 8457 88
8.995300+4 1.799060+3 0 0 6 03541 8457 89
4.000000+0 0.000000+0 2.498280-5 2.498280-6 0.000000+0 0.000000+03541 8457 90
9.335000+4 1.867000+3 0 0 6 03541 8457 91
4.000000+0 0.000000+0 4.075500-5 4.075500-6 0.000000+0 0.000000+03541 8457 92
1.048310+5 2.096620+3 0 0 6 03541 8457 93
4.000000+0 0.000000+0 4.727580-6 4.727580-7 0.000000+0 0.000000+03541 8457 94
1.056100+5 2.112200+3 0 0 6 03541 8457 95
4.000000+0 0.000000+0 9.006860-6 9.006860-7 0.000000+0 0.000000+03541 8457 96
1.086000+5 2.172000+3 0 0 6 03541 8457 97
4.000000+0 0.000000+0 4.890600-6 4.890600-7 0.000000+0 0.000000+03541 8457 98"""
return text | 446efcce73bfebe404f442d7657d2d3ae6674e80 | 693,994 |
def normalize_url(url):
"""If passed url doesn't include schema return it with default one - http."""
if not url.lower().startswith('http'):
return 'http://%s' % url
return url | b957056e8ca32f6294c85466b6e52c56bb1dba84 | 693,995 |
def search_matrix_lintcode(matrix, target):
"""
Search target in given 2D matrix
:param matrix: given matrix
:type matrix: list[list[int]]
:param target: target number
:type target: int
:return: number of occurrences
:rtype: int
"""
occur = 0
if len(matrix) != 0 or len(matrix[-1]) != 0:
row, col = 0, len(matrix[-1]) - 1
while row < len(matrix) and col >= 0:
if matrix[row][col] == target:
occur += 1
col -= 1
elif matrix[row][col] < target:
row += 1
else:
col -= 1
return occur | 74ef1f0e60db34768f45f7c56bf0b45339746bfb | 693,996 |
def singleGridIndexToGridIndex(i, nx, ny, nz):
"""
Convert a single into a grid index (3 indices):
:param i: (int) single grid index
:param nx, ny, nz: (int) number of grid cells in each direction
:return: ix, iy, iz:
(3-tuple) grid index in x-, y-, z-axis direction
Note: i can be a ndarray, then
ix, iy, iz in output are ndarray (of same shape)
"""
nxy = nx*ny
iz = i//nxy
j = i%nxy
iy = j//nx
ix = j%nx
return ix, iy, iz | 12b3b69318437d66007fd5295ae209f8186f8b36 | 693,997 |
import inspect
def _get_not_annotated(func, annotations=None):
"""Return non-optional parameters that are not annotated."""
argspec = inspect.getfullargspec(func)
args = argspec.args
if argspec.defaults is not None:
args = args[:-len(argspec.defaults)]
if inspect.isclass(func) or inspect.ismethod(func):
args = args[1:] # Strip off ``cls`` or ``self``.
kwonlyargs = argspec.kwonlyargs
if argspec.kwonlydefaults is not None:
kwonlyargs = kwonlyargs[:-len(argspec.kwonlydefaults)]
annotations = annotations or argspec.annotations
return [arg for arg in args + kwonlyargs if arg not in annotations] | b16e9a2f1e7b71a1b9e4df303a01e67b7eea5482 | 693,998 |
def keysplit(strng):
"""Split an activity key joined into a single string using the magic sequence `⊡|⊡`"""
return tuple(strng.split("⊡|⊡")) | eb1b965843b602410010337f23ea8d0053f2f4b6 | 693,999 |
import re
def ensure_peak_format(ann, sep=[':', '-']):
"""
Ensure that the AnnData object have a consistent
peak format for later comparisons and operations.
"""
peaks = ann.var.index.tolist()
non_alphanum = "[^0-9a-zA-Z.]+"
regex = [re.findall(non_alphanum, p) for p in peaks]
if all(r==sep for r in regex):
print('Peak format is consistent')
return ann
else:
print('Peak format is inconsistent. Reformatting.')
peaks = [p.replace(r[0], sep[0], 1).replace(r[1], sep[1], 1)
for r, p in zip(regex, peaks)]
ann.var = ann.var.reindex(index=peaks)
return ann | 810b619a98baa4e0a05b8759ca8801b82903eb96 | 694,000 |
def flatten_list(list_):
"""
Turn list of lists into list of sublists' elements.
Args:
list_ (list): List to flatten.
Returns:
Flattened list.
"""
return [item for sublist in list_ for item in sublist] | d42e5ef6e2333cbce13e6c4294f8b6b2c8a8ca70 | 694,001 |
import re
def argo_gdac_float_meta(profile_index,float_wmoid):
""" Accessor for metadata on a specific Argo float from GDAC.
Returns dict 'this_float_meta' relevant to the given float's profiles, with following keys:
'num_profs': number of profiles (integer)
'prof_nums': array of profile numbers (integer form, so '_000D' and '_000' would both be integer 0)
'prof_nums_full': array of profile numbers (full string [alphanumeric] form, preserving, e.g. '_000D')
'prof_datetimes': array of profile datetimes (18-digit integer format)
'prof_statuses': array of profile statuses (e.g. 'D' for delayed mode)
'prof_lats': array of profile latitudes
'prof_lons': array of profile longitudes
'prof_position_flags': array of profile position QC flags (1 = likely good, 2 = interpolated, assumed under ice, 9 = bad)
'prof_filenames': array of profile filenames
"""
this_float_mask = profile_index[:,0] == float_wmoid
prof_filenames = profile_index[this_float_mask, 1]
this_float_num_profs = len(prof_filenames)
filename_regexp_full = re.compile('[A-Z][0-9]*_([0-9]*[A-Z]*).nc')
filename_regexp_int = re.compile('[A-Z][0-9]*_([0-9]*)[A-Z]*.nc')
this_float_meta = {}
this_float_meta['num_profs'] = this_float_num_profs
this_float_meta['prof_nums'] = [int(filename_regexp_int.findall(prof_filenames[n])[0]) for n in range(this_float_num_profs)]
this_float_meta['prof_nums_full'] = [filename_regexp_full.findall(prof_filenames[n])[0] for n in range(this_float_num_profs)]
this_float_meta['prof_datetimes'] = profile_index[this_float_mask, 5]
this_float_meta['prof_statuses'] = profile_index[this_float_mask, 2]
this_float_meta['prof_lats'] = profile_index[this_float_mask, 6]
this_float_meta['prof_lons'] = profile_index[this_float_mask, 7]
this_float_meta['prof_position_flags'] = profile_index[this_float_mask, 3]
this_float_meta['prof_filenames'] = profile_index[this_float_mask, 1]
return this_float_meta | bdcf6da4d02ef1961ac79a8af2790ad2f0418df7 | 694,002 |
def get_close_descriptions(get_close, initial_state, current_state):
"""
Get all 'close' descriptions from the current state (if any).
Parameters
----------
get_close: function
Function that gets the drawer or the door which is closed.
initial_state: nd.array
Initial state of the environment.
current_state: nd.array
Current state of the environment.
Returns
-------
descr: list of str
List of 'close' descriptions satisfied by the current state.
"""
close_descriptions = []
close_thing = get_close(initial_state, current_state)
for c in close_thing:
close_descriptions.append('Close the {}'.format(c))
return close_descriptions.copy() | 54181292066c58968d00e4d8e3d98ec584338e18 | 694,003 |
def tmpfile(tmpdir):
"""A temporary file that can be read"""
file_path = tmpdir.join("test")
file_path.write("Temporary test file")
return file_path | 46feeb0c7ef4a3e9771376c977adefba4e64de08 | 694,004 |
def doc_to_text(doc):
"""
Convert document object to original text represention.
Assumes parser offsets map to original document offsets
:param doc:
:param sent_delim:
:return:
"""
text = u""
for i, sent in enumerate(doc.sentences):
# setup padding so that BRAT displays a minimal amount of newlines
# while still preserving char offsets
if len(text) != sent.abs_char_offsets[0]:
padding = (sent.abs_char_offsets[0] - len(text))
text += ' ' * (padding - 1) + u"\n"
text += sent.text.rstrip(u' \t\n\r')
return text | 64595c77fbad868ce7aa89aa0cdecc9773cd6c44 | 694,005 |
def get_bumped_prerelease_version(version):
"""Return the pre-release version of the specified version.
:param version: version to get the pre-release version of
"""
prerelease_version = version.next_patch()
prerelease_version.prerelease = ('beta', )
return prerelease_version | db5966c687b77b268dcdcdbff79bbe90a9d17624 | 694,006 |
import torch
def l1_penalty(model, l1_coef):
"""Compute L1 penalty. For implementation details, see:
https://discuss.pytorch.org/t/simple-l2-regularization/139
"""
reg_loss = 0
for param in model.pcca.parameters_('y2'):
reg_loss += torch.norm(param, 1)
return l1_coef * reg_loss | 3a2ddc8bd1eeb64e9ba94ce009e2e621677242e2 | 694,007 |
def list_parameters_from_groups(parameter_groups, groups):
"""Return a list of all the parameters in a list of groups
"""
return [
p for group in groups
for p in parameter_groups[group].values()
] | 2ea08013616fd978a49f8d2d5fa2c233a17fabfc | 694,008 |
def color(marks, index):
"""Compute color for given mark index."""
steps_of_color = int(256 / len(marks) * 2) - 1
index_of_yellow_label = int(len(marks) / 2)
if index < index_of_yellow_label:
r = abs(0 - index) * steps_of_color
g = 255
return 'fill:rgb({} {} {});'.format(r, g, 0)
elif index > index_of_yellow_label:
g = 255 - abs(index_of_yellow_label - index) * steps_of_color
r = 255
return 'fill:rgb({} {} {});'.format(r, g, 0)
return 'fill:rgb({} {} {});'.format(255, 255, 0) | cd0b4e15fa98174c39e81bf94c80d4f7d37b2f6b | 694,009 |
def ec2config_delete(ec2config):
"""Implement here your function"""
return ec2config | 7476aa5f7c82353d29d49d07a8e23a9cfee1002d | 694,010 |
import string
def is_hexdigit(s):
"""Check if all characters are hexadecimal digits.
:param str s: the string
:return: ``True`` if all characters in the string are hexadecimal digits
and there is at least one character
:rtype: bool
.. versionadded:: 0.5.0
"""
return bool(s) and all(x in string.hexdigits for x in s) | e556080419cefa9ec1668a0fc0a19010c49e261f | 694,011 |
def calc_bpe_charge_transfer_resistance(j_0_bpe, T):
"""
The area specific charge transfer resistance through the BPE
units: Ohm*m2
Notes:
"""
kb = 1.3806e-23 # (J/K) Boltzmann constant
e = -1.602e-19 # (C) charge of an electron
R_ct = kb * T / j_0_bpe / e
return R_ct | d90a8a45ea03e213ea46aeab1dfab98bf1e78f56 | 694,013 |
def unhex(s):
"""unhex(s) -> str
Hex-decodes a string.
Example:
>>> unhex("74657374")
'test'
"""
return s.decode('hex') | dcf2d6cd9c317b5eafd77e969272fcf753556403 | 694,014 |
import shlex
import subprocess
def run(cmd):
"""Runs the given command, returning the exit code (nonzero on failure)"""
print('\nRunning: ' + shlex.join(cmd) + '\n')
return subprocess.call(cmd) | d81c3eea046464b9d542b6d2f8ced1417d5dc075 | 694,015 |
import json
def get_features(geojson_file, nl=False):
"""
Get a list of features from something resembling geojson.
Note that if the `nl` option is True, this will return a generator
that yields a single feature from each line in the source file.
"""
if nl:
return (json.loads(line) for line in geojson_file if line.strip())
# if not nl, load the whole file
geojson = json.load(geojson_file)
if not isinstance(geojson, dict):
raise TypeError("GeoJSON root must be an object")
if geojson.get("type") not in ("Feature", "FeatureCollection"):
raise ValueError("GeoJSON must be a Feature or a FeatureCollection")
if geojson["type"] == "Feature":
return [geojson]
return geojson.get("features", []) | 4c3b5bb6011968d3361b44b358a846b51d4fe404 | 694,016 |
def _find_test_plugins(paths_file):
"""
One plugin path per line in a file
Lines starting with a # are considered comments and ignored
:type paths_file: str
:return: List[str]
"""
with open(paths_file) as f:
path = f.read().strip()
lines = path.split('\n')
lines = [x.strip() for x in lines]
lines = [x for x in lines if not x.startswith('#')]
return lines | 85b62e7ab41ea02f8d123379c7b0ed2b96c46566 | 694,019 |
def sort_population(population):
"""
Sorts the population based on fitness values
:param population: population to be sorted
:return: sorted population
"""
return sorted(population, key=lambda tup: float(tup[1]), reverse=True) | b368bcec27cb4162a464260895bfa463729b4df1 | 694,020 |
import hashlib
def block_compute_raw_hash(header):
"""
Compute the raw SHA256 double hash of a block header.
Arguments:
header (bytes): block header
Returns:
bytes: block hash
"""
return hashlib.sha256(hashlib.sha256(header).digest()).digest()[::-1] | 4d6a13316aeda0ec42ca1904ba91170ca0220aae | 694,021 |
import csv
def parseGeneColor(by_gene_color_file_name):
"""
Returns
gene2label
"""
gene_file = open(by_gene_color_file_name)
gene2label = {}
csv_reader = csv.DictReader(gene_file, delimiter="\t")
for row in csv_reader:
gene2label[row["gene"]] = row["label"]
return gene2label | 80340d02bacfec4e273b87aea12e9c4122c0e784 | 694,022 |
import operator
def map_method(*args, **kwargs):
"""
Given a method name and a sequence of models, return a sequence of values
of applying the extra positional and keyword arguments in each method.
Attr:
attr:
Method name. Dotted python names are valid.
models:
A sequence of models.
*args, **kwargs:
Arguments to pass to m.<attr>(*args, **kwargs)
"""
attr, data, *args = args
attr, _, method = attr.partition(".")
if method:
data = map(operator.attrgetter(attr), data)
return map(operator.methodcaller(method, *args, **kwargs), data)
else:
return map(operator.methodcaller(attr, *args, **kwargs), data) | aa2ab34f729c41376e30c9a9b8c6536e14ba8be4 | 694,023 |
def compare_version_part(a_part, b_part):
"""Compare two parts of a version number and return the difference (taking into account
versions like 7.0.0-M1)."""
try:
a_bits = a_part.split('-')
b_bits = b_part.split('-')
version_difference = int(a_bits[0]) - int(b_bits[0])
if version_difference != 0 or (len(a_bits) == 1 and len(b_bits) == 1):
return version_difference
if len(a_bits) != len(b_bits):
# Fewer parts indicates a later version (e.g. '7.0.0' is later than '7.0.0-M1')
return len(b_bits) - len(a_bits)
# If letter doesn't match then we can't compare the versions.
a_letter = a_bits[1][0]
b_letter = b_bits[1][0]
if a_letter != b_letter:
return 0
# Try to get number from after M, A or RC and compare this.
a_number_start = [char.isdigit() for char in a_bits[1]].index(True)
b_number_start = [char.isdigit() for char in b_bits[1]].index(True)
return int(a_bits[1][a_number_start:]) - int(b_bits[1][b_number_start:])
except ValueError:
# If the strings aren't in the format we're expecting then we can't compare them.
return 0 | 1bcd1a3c8c4b070e0b33abc0d9c15cbfd146a80a | 694,024 |
import torch
def _create_metafeature_tensor(metafeatures, seq, metafeature_spec):
"""Convert a metafeature vector into a tensor.
:returns Tensor: dim <string_length x 1 x metafeature_dim>, where
metafeature_dim is a continuous feature.
"""
m = []
for i, feature in enumerate(metafeatures):
fname, ftype, flevels = metafeature_spec[i]
if ftype is int:
metafeature_dim = 1
feature_val = ftype(feature)
feature_index = 0
elif ftype is str:
metafeature_dim = len(flevels)
feature_val = 1
feature_index = flevels.index(feature)
else:
raise ValueError(
"metafeature type %s not recognized" % ftype)
t = torch.zeros(len(seq), 1, metafeature_dim)
for j, _ in enumerate(seq):
t[j][0][feature_index] = feature_val
m.append(t)
m = torch.cat(m, 2)
return m | b61beaaabcc06bd5b2034cb10077fd4523c9e71d | 694,025 |
import os
def get_template(name, path):
"""Retrieves the path of the template.
Searches the path provided to find the name of the template. If the
template exists, return the name, otherwise Return False.
Parameters:
name (str): The name of the template.
Returns:
str: the path of the template if it exists.
bool: False if the template doesn't exist.
"""
template_path = os.path.join(path, name)
if os.path.exists(template_path):
return template_path
return False | 5afbc1de13d8a87d04f2e230c0bc00e8e8b84b9f | 694,026 |
def get_cell_genes(cur,cell):
"""
Return the genes expressed in cell
Parameters
----------
cur : MySQLdb cursor
cell : str
Name of cell
"""
sql = ("select genename from WB.association "
"join WB.cells on "
"WB.association.idcell = WB.cells.idcells "
"where WB.cells.name like '%s'"
%cell)
cur.execute(sql)
return [a[0] for a in cur.fetchall()] | 03550cc6a8645b49c86d280d17032b05102b88d3 | 694,027 |
import difflib
def diff(string1, string2):
"""
Returns an array where array[0] is the content of s2 that have been added
in regards to s1 and array[1] is the content of s2 that has been removed
from s1
"""
differ = difflib.Differ()
added = ""
removed = ""
for i in differ.compare(string1, string2):
if i[0] == "+":
added += i[2]
elif i[0] == "-":
removed += i[2]
return [added, removed] | 5192ec2001364715c2de73cab6a32733833bb309 | 694,028 |
def unflatten_dict(d, separator='.'):
"""Unflatten nested dictionary.
Transforms {'a.b.c': 5, 'a.d': 6} into {'a': {'b': {'c': 5}, 'd': 6}}
Args:
d (dict): nested dictionary to flatten.
separator (str): the separator to use between keys.
prefix (str): key prefix
Returns:
dict: a expanded dictionary with keys uncompressed.
"""
ret = dict()
for key, value in d.items():
parts = key.split(separator)
d = ret
for part in parts[:-1]:
if part not in d:
d[part] = dict()
d = d[part]
d[parts[-1]] = value
return ret | bc51bc813d9e9c0bf95956213e34349425cc8ffb | 694,029 |
import random
def RAND():
"""
Returns a random number between 0 inclusive and 1 exclusive.
"""
return random.random() | 1dc007502f8951187b28a10ad326ec98205737ce | 694,030 |
def add_values_to_config(defaults, values, source):
"""Given a defaults dictionary (structured like configurable_defaults above)
and a possibly nested config dict, combine the two and return a
new dict, structured like cfg_defaults. Every node will have at least
'type', 'source' and 'value' keys."""
result = {}
for key in list(defaults.keys()) + list(values.keys()):
value = values.get(key)
default = defaults.get(key)
if key not in defaults:
if isinstance(values[key], dict):
result[key] = {
"type": "map",
"source": None,
"value": add_values_to_config({}, value, source),
}
else:
result[key] = {"type": "any", "source": source, "value": value}
elif key not in values:
result[key] = default
else:
if default["source"] == "database":
assert source != "default"
result[key] = dict(default, configured_value=value)
elif default["type"] == "map":
if not isinstance(value, dict):
raise TypeError(
f"Value found where dict expected at {key}: {value} in {source}"
)
result[key] = {
"type": "map",
"source": None,
"value": add_values_to_config(default["value"], value, source),
}
else:
result[key] = {"type": "any", "source": source, "value": value}
return result | 5be63bb25b6b626c473878428d63ae2b01e06648 | 694,031 |
import torch
def test(model, test_loader, device=torch.device("cpu")):
"""Checks the validation accuracy of the model.
Cuts off at 512 samples for simplicity.
"""
model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
if batch_idx * len(data) > 512:
break
data, target = data.to(device), target.to(device)
outputs = model(data)
_, predicted = torch.max(outputs.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return correct / total | 2df49a192c144a9300b5cd218b347a55f79da467 | 694,032 |
import os
import shutil
def remove_if_exist(path):
"""Delete a file or a directory recursively if it exists, else no exception is raised"""
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
return True
elif os.path.isfile(path):
os.remove(path)
return True
return False | b2ad0b5ced9a91b85d5b04df497c42e8677fa499 | 694,033 |
def plotmatrix(X):
"""
Creates a matrix of marginal plots.
On diagonal, are marginal plots of each variable. Off-diagonal plot (i,j)
shows the joint marginal density of x_i and x_j.
"""
return X.plotmatrix() | 12e3de82f8b4ecd4ed41da85d465ecfc8da06f00 | 694,034 |
def isGlideinHeldNTimes(jobInfo, factoryConfig=None, n=20):
"""This function looks at the glidein job's information and returns if the
CondorG job is held for more than N(defaults to 20) iterations
This is useful to remove Unrecoverable glidein (CondorG job) with forcex option.
Args:
jobInfo (dict): Dictionary containing glidein job's classad information
Returns:
bool: True if job is held more than N(defaults to 20) iterations, False if otherwise.
"""
if factoryConfig is None:
factoryConfig = globals()["factoryConfig"]
greater_than_n_iterations = False
nsysholds = jobInfo.get("NumSystemHolds")
if nsysholds > n:
greater_than_n_iterations = True
return greater_than_n_iterations | e608ada46f1571b9e96531bc3cca3692f940846a | 694,035 |
def SliceCoordsConstant(minVal, maxVal, divisions):
"""
Generate one dimension of annulus slice coordinates based upon the supplied
geometry information, with constant node spacing
"""
return [minVal + (maxVal - minVal) * (float(i) / divisions) for i in range(divisions + 1)] | a69ad0eaafc17c2742407e278c9f6bd414471945 | 694,036 |
def get_z_from_orient(mat_orient):
"""To get the angle in degree from an orientation matrix"""
return mat_orient[1,2] | 97fd0e836a74b4d7e5265b89d886da7ba7c02890 | 694,037 |
import re
def prepare_whitelist(patterns):
"""Join and compile the re patterns.
Args:
patterns: regex patterns.
Return:
A compiled re object
"""
return re.compile('|'.join(patterns)) | 5c1eb91c2aa534af6b8ff5cc4165095a5eb577e7 | 694,038 |
def _GetVersionIndex(version_str):
"""Returns the version index from ycsb version string.
Args:
version_str: ycsb version string with format '0.<version index>.0'.
Returns:
(int) version index.
"""
return int(version_str.split('.')[1]) | 9c415a55a97adedb6bbc5576d7147d2164651d4e | 694,041 |
import re
def parse_ping_output(ping_output):
"""Parse ping output.
Returns a tuple with the number of packets sent and the percentage of
packet loss from a ping output."""
match = re.search(
'(\d*) packets transmitted, .* ([\d\.]*)\% packet loss',
ping_output)
return match.groups() if match is not None else None | e3af78babdbd21dd6369f18de150a9d453fe7d21 | 694,042 |
def concat(liste):
"""
Concatenate lines_list to rebuild the treated file content
=============== ============ ==============================================================
**Parameters** **Type** **Description**
*liste* string list a string list representing the content of a .extension file
=============== ============ ==============================================================
Returns
-------
string
The rebuilt file contents from given list
Example
-------
>>> for i in range(0,len(test)):
... print(test[i])
...
#include<stdio.h>
void myfunction(int arg){
int some_things;
}
>>> concatenated=concat(test)
>>> print(concatenated)
#include<stdio.h>
void myfunction(int arg){
int some_things;
}
"""
res=""
for line in liste:
res+=line
return res | 72a937674c635b2044507221a32ef81dd5e01217 | 694,043 |
def get_fake_ap_data():
"""
Grabs the selected fake AP top-of-ticket data file.
"""
path = 'www/timemachine/US.txt'
with open(path, 'rb') as f:
return f.readlines() | 0b951d006523e98ba2e822af020aba897b03ff60 | 694,044 |
def is_transform(str_in):
"""Determines if str_in is the name of a transformation file
"""
return '.ima' in str_in \
or '.trm' in str_in | c7f65ac6c86776c640a0027c94b0cee5afdd8cfb | 694,045 |
def process_resize_value(resize_spec):
"""Helper method to process input resize spec.
Args:
resize_spec: Either None, a python scalar, or a sequence with length <=2.
Each value in the sequence should be a python integer.
Returns:
None if input size is not valid, or 2-tuple of (height, width), derived
from input resize_spec.
"""
if not resize_spec:
return None
if isinstance(resize_spec, int):
# For conveniences and also backward compatibility.
resize_spec = (resize_spec,)
resize_spec = tuple(resize_spec)
if len(resize_spec) == 1:
resize_spec = (resize_spec[0], resize_spec[0])
if len(resize_spec) != 2:
raise ValueError('Unable to process input resize_spec: %s' % resize_spec)
if resize_spec[0] <= 0 or resize_spec[1] <= 0:
return None
return resize_spec | 2850b1f86a62cb35611bcb89a543bb35eb8226bf | 694,046 |
import requests
import json
def get_elements_from_elasticsearch(sysmlId, index="mms", elasticHost="localhost"):
"""
Method will return an array of elements based on the sysmlid provided. It will be the entire history of the element.
:param sysmlid: string
:param index: ElasticSearch Index
:param elastichost: Hostname of ElasticSearch
:return:
"""
query = {
"query":{
"term":{
"id":sysmlId
}
}
}
res = requests.post("http://{}:9200/{}/_search".format(elasticHost,index), data=json.dumps(query))
return res.json()["hits"]["hits"] | 2df4484bfa7209ab356cc0e7b5f97f7c611cfb34 | 694,047 |
def Tokenize_Context(context, question, tokenizer):
""" Tokenize single sample.
Params:
context
question
tokenizer: tokenizer of Transformer-based model from huggingface
Return:
input_id
attention_mask
segment_id
"""
encoded_dict = tokenizer.encode_plus(
question,
context,
add_special_tokens = True, # Add '[CLS]' and '[SEP]'
max_length = 256, # Pad & truncate all sentences.
pad_to_max_length = True,
truncation = True,
return_attention_mask = True, # Construct attention masks.
return_tensors = 'pt', # Return pytorch tensors.
return_token_type_ids = 'True'
)
# =============================
# Store Encoded Sample
# =============================
input_id = encoded_dict['input_ids']
attention_mask = encoded_dict['attention_mask']
segment_id = encoded_dict['token_type_ids']
return input_id, attention_mask, segment_id | ed0a25e6b5c7ea3941d73bd4fd872d23ed8b96d5 | 694,048 |
def split_wrap(sql):
"""Split with \n, and strip the ' '. """
sql_list = sql.split('\n')
if sql_list[0] == '':
del sql_list[0]
if sql_list[-1] == '':
del sql_list[-1]
sql_list = list(map(lambda x: x.strip(), sql_list))
return sql_list | 7d8639b99219226b5a29da204a1c7fd8f71acc0e | 694,050 |
def fastpath_dup_rid_input(app):
"""
Access DB directly. Fetch > 1 measurements from fastpath that share the same
report_id and input
Returns (rid, input, count)
"""
sql = """
SELECT report_id, input,
from fastpath
group by report_id, input
HAVING count(*) > 1
LIMIT 1
"""
with app.app_context():
for row in app.db_session.execute(sql):
return (row[0], row[1]) | a09e82cd84483c21722a197ab8f5becae2c1d5a6 | 694,051 |
def get_BM_data(filename):
"""Read the contents of the given file. Assumes the file
in a comma-separated format, with 6 elements in each entry:
0. Name (string), 1. Gender (string), 2. Age (int)
3. Division (int), 4. Country (string), 5. Overall time (float)
Returns: dict containing a list for each of the 6 variables."""
data = {}
f = open(filename)
line = f.readline()
data['name'], data['gender'], data['age'] = [], [], []
data['division'], data['country'], data['time'] = [], [], []
while line != '':
split = line.split(',')
data['name'].append(split[0])
data['gender'].append(split[1])
data['age'].append(int(split[2]))
data['division'].append(int(split[3]))
data['country'].append(split[4])
data['time'].append(float(split[5][:-1])) #remove \n
line = f.readline()
f.close()
return data | c3dce697ed850ed422a5d60fbd08310f04461a82 | 694,052 |
def get_xml_line(xml_list, index):
"""get xml specified line valid string data"""
ele = None
while xml_list and not ele:
if index >= 0 and index >= len(xml_list):
return None
if index < 0 and abs(index) > len(xml_list):
return None
ele = xml_list[index]
if not ele.replace(" ", ""):
xml_list.pop(index)
ele = None
return ele | 9bc5de648c4213f30d0142dc595d0f38549d1564 | 694,053 |
def build_B_spline_higher_degree_basis_fns(
breaks, prev_degree_coefs, degree, x):
"""Build the higer order B spline basis coefficients
N_{i,p}(x) = ((x-u_i)/(u_{i+p}-u_i))N_{i,p-1}(x) \
+ ((u_{i+p+1}-x)/(u_{i+p+1}-u_{i+1}))N_{i+1,p-1}(x)
"""
assert degree > 0
coefs = []
for i in range(len(prev_degree_coefs)-1):
alpha1 = (x-breaks[i])/(breaks[i+degree]-breaks[i]+1e-12)
alpha2 = (breaks[i+degree+1]-x)/(breaks[i+degree+1]-breaks[i+1]+1e-12)
coef = alpha1*prev_degree_coefs[i] + alpha2*prev_degree_coefs[i+1]
coefs.append(coef)
return coefs | a8b732ce519a608ea277aa1c1165be8d674e141b | 694,054 |
import subprocess
def get_operation(migration):
"""Gets the current operation being executed by the migration"""
command = ['kubectl', 'get', 'migrations.anthos-migrate.cloud.google.com', '-n', 'v2k-system', migration, '-o',
'jsonpath={.status.currentOperation}']
output = subprocess.run(command, capture_output=True, encoding='utf-8')
output.check_returncode()
print("Current Operation: ", output.stdout.strip())
return output.stdout.strip() | 53a4f6fc2b5a3e802e8ac9fd69339095e623ac85 | 694,055 |
def circular_distance(a: int, b: int, C: int) -> int:
"""
Finds the shortest distance between two points along the perimeter of a circle.
arguments:
a: a point on a circle's circumference.
b: another point on the cicrle.
C: the total circumference of the circle.
return:
The shortest distance along the circumference of the circle between the two points
>>> circular_distance(2,5,10)
3
>>> circular_distance(12,3,15)
6
>>> # It even works with numbers >C or <0
... circular_distance(-20, 37, 10)
3
"""
arc = abs(a - b) % C # the distance between these in one direction -- not necessarily the shortest distance
return min(C - arc, arc) | 1b1a54dca54edc18ef4c44a9e54f883410fb7c8f | 694,056 |
from pathlib import Path
def cache_dir(warn_scraper_dir):
"""Set scraper directory."""
return str(Path(warn_scraper_dir).joinpath("cache")) | 7c840f6529a6d1783afa2358d1b3d92d8693ed85 | 694,057 |
def get_proxy_list_size(proxy_list):
""" Return the current Queue size holding a list of proxy ip:ports """
return proxy_list.qsize() | 982240d8225f7c79327c661c91d6741311c1dd4e | 694,058 |
def find_toolchain(ctx):
"""Finds the first rust toolchain that is configured.
Args:
ctx (ctx): The ctx object for the current target.
Returns:
rust_toolchain: A Rust toolchain context.
"""
return ctx.toolchains["@io_bazel_rules_rust//rust:toolchain"] | 2c003b40529dec403c17cf02d7bf97f0e1d427fa | 694,059 |
def apply_function(funct, value, iterations):
"""
Example of a function that takes another function as an argument
"""
for index in range(iterations):
value = funct(value)
return value | 65836cb9b76adeb43c6e92bdc72c15d49ebf4b01 | 694,061 |
def dectodms(decdegs):
"""Convert Declination in decimal degrees format to hours, minutes,
seconds format.
Keyword arguments:
decdegs -- Dec. in degrees format
Return value:
dec -- list of 3 values, [degrees,minutes,seconds]
"""
sign = -1 if decdegs < 0 else 1
decdegs = abs(decdegs)
if decdegs > 90:
raise ValueError("coordinate out of range")
decd = int(decdegs)
decm = int((decdegs - decd) * 60)
decs = (((decdegs - decd) * 60) - decm) * 60
# Necessary because of potential roundoff errors
if decs - 60 > -1e-7:
decm += 1
decs = 0
if decm == 60:
decd += 1
decm = 0
if decd > 90:
raise ValueError("coordinate out of range")
if sign == -1:
if decd == 0:
if decm == 0:
decs = -decs
else:
decm = -decm
else:
decd = -decd
return (decd, decm, decs) | 4e41df37b9df3e375809c9c8c9f89ef420e0ac60 | 694,062 |
import re
def sub(pattern, repl, names, count=0, flags=0):
"""Replace characters that match regular expression."""
regex = re.compile(pattern, flags=flags)
return [regex.sub(repl, i, count=count) for i in names] | b85c9e6a350a5cbfbb9bde372def0a892517d9c1 | 694,063 |
def entrarNota(text):
""" funcion para añadir una nota """
while True:
try:
nota = float(input("{}(0-100): ".format(text)))
if 0<=nota<=100:
return nota
else:
print("la nota tiene que estar entre 0 y 100")
except:
print("la nota tiene que ser un valor numerico") | ed628593e5a82be03873554a116f68f1299ac4d2 | 694,064 |
import os
def executable_name( f ):
"""Folder where the executable is"""
return os.path.basename( f ) | 27b7e9aee1dfd97a4c7dfaeb3cd18d0672822e69 | 694,065 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.