content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def sum_offset(data: list[int]) -> int:
"""Given a list of ints, compare alternating pairs (0,1), (1,2), (2,3) etc. If the second number is larger add 1
else 0. Return the total sum.
Each pair increases:
>>> sum_offset([1, 2, 3, 4, 5])
4
No pair increases:
>>> sum_offset([5, 4, 3, 2, 1])
0
Mix of both:
>>> sum_offset([5, 2, 6, 1, 83])
2
"""
return sum([1 if b > a else 0 for a,b in zip(data, data[1:])]) | 2459abdd351779c1bde8d8aa671d4c5a03afd18a | 693,391 |
import requests
def act(url, observation, configuration):
"""
Sends a post request to one of the two agents.
"""
data = {"observation": observation, "configuration": configuration}
response = requests.post(url=url, json=data).json()
return response["best_action"] | f6d22eb9f33011f678399967be15a6a9c928851d | 693,392 |
def remove_exception_module(msg):
"""Copy&paste so we keep the exception message and support multi-line."""
start, end = 0, len(msg)
name_end = msg.find(':', 0, end)
i = msg.rfind('.', 0, name_end)
if i >= 0:
start = i + 1
return msg[start:end] | bc47ac2f3f4c9c7c4e1642a1c82afac7227df754 | 693,394 |
import re
def zero_digits(s):
#{{{
"""
Replace every digit in a string by a zero.
"""
return re.sub('\d', '0', s) | ac0005da351b0f8ef7f31552ecc05d3189e4c730 | 693,396 |
def school_year(date, as_tuple=False):
"""
Return the school year of 'date'. Example:
* as_tuple = False: "2013 — 2014"
* as_tuple = True: [2013, 2014]
"""
if date.month < 8:
start_year = date.year - 1
else:
start_year = date.year
if as_tuple:
return (start_year, start_year + 1)
else:
return "%d — %d" % (start_year, start_year + 1) | b18fcde3abb76f5f90a48792c1e9be21da6ff276 | 693,397 |
def id_finder(root):
""" Finds the first free ID with no used IDs above it"""
id_whole = 0
for child in root:
if 'id' in child.attrib:
new = int(child.attrib['id'])
if(id_whole <= new):
id_whole = new
return str(id_whole + 1) | d1ce0df5d28f1a908abce462a9c29e0e5c808e5b | 693,398 |
import contextlib
import wave
def wav_duration(filename):
"""
get wav file duration in seconds
"""
with contextlib.closing(wave.open(filename,'r')) as f:
frames = f.getnframes()
rate = f.getframerate()
duration = frames / float(rate)
#print(duration)
return duration | 0fc9e513a21dcf6eea03ad935a620a63bd830bb6 | 693,399 |
import random
def random_captcha_str(random_length=16):
"""
random character exclude captcha. ex: i,1; o,0
生成随机字符 但是排除不适合做验证码的比如i,1; 0,o 等
:param random_length:
:return:
"""
rs = ''
chars = 'abcdefghjklmnpqrstuvwxyz123456789'
for i in range(random_length):
rs += random.choice(chars)
return rs | 6169202694f286916e94cb4d0935c44f2bdfaf89 | 693,400 |
def normalizeMessage(message):
"""
un-tab and rstrip an informational message
tab-to-space replacement and rstripping helps with repeatable doctests
"""
message = message.decode('utf-8')
n = []
for k in message.split('\n'):
n.append(k.strip().replace('\t',' '))
return '\n'.join(n) | 31799a9a92d85aa830e663f4a2464e75b36a0af5 | 693,401 |
import argparse
def init_argparse():
"""初始化参数"""
parser = argparse.ArgumentParser(description='Wapi command',)
parser.add_argument('-c', '--config', help='Config dir name')
parser.add_argument('-m', '--module', help='Module name')
parser.add_argument('-n', '--name', help='Request name')
parser.add_argument('-s', '--space', help='Space name')
return parser | 215550e55366dd9590e6789d8c7df6f7ccbdbda2 | 693,402 |
def eq_strict(a, b):
"""Returns True if both values have the same type and are equal."""
if type(a) is type(b):
return a == b
return False | 72b80476d94d9dd8e2721afb587fa63b8704c820 | 693,403 |
def same_text(s1, s2):
"""True if both strings are the same, ignoring case."""
# note, unicodedata.normalize is a good idea to get this to work all the time,
# but this is only used here to compare against our simple directive names
return s1.casefold() == s2.casefold() | 265e21a7e6c51ceb4aa836f7eac022dc1136d3e8 | 693,404 |
def scheduler(epoch):
"""
learning rate scheduler.
:param epoch: number of epochs
:type epoch: int
"""
if epoch < 10:
return 0.001
else:
return 0.001 - (.00005 * epoch) | aac6a3b0e54884abde7ec041be46431c270a99f9 | 693,405 |
def coord_each(geojson, callback, excludeWrapCoord=None):
"""
Iterate over coordinates in any GeoJSON object, similar to Array.forEach()
:return:
"""
if not geojson:
return
coord_index = 0
type = geojson["type"]
is_feature_collection = type == "FeatureCollection"
is_feature = type == "Feature"
stop = len(geojson["features"]) if is_feature_collection else 1
for feature_index in range(0, stop):
if is_feature_collection:
geometry_maybe_collection = geojson["features"][feature_index]["geometry"]
elif is_feature:
geometry_maybe_collection = geojson["geometry"]
else:
geometry_maybe_collection = geojson
if geometry_maybe_collection:
is_geometry_collection = (
geometry_maybe_collection["type"] == "GeometryCollection"
)
else:
is_geometry_collection = False
stopg = (
len(geometry_maybe_collection["geometries"]) if is_geometry_collection else 1
)
for geom_index in range(0, stopg):
multi_feature_index = 0
geometry_index = 0
geometry = (
geometry_maybe_collection["geometries"][geom_index]
if is_geometry_collection
else geometry_maybe_collection
)
if not geometry:
continue
coords = geometry["coordinates"]
geom_type = geometry["type"]
wrap_shrink = (
1
if excludeWrapCoord
and (geom_type == "Polygon" or geom_type == "MultiPolygon")
else 0
)
if geom_type:
if geom_type == "Point":
# if not callback(coords):
# return False
callback(
coords,
coord_index,
feature_index,
multi_feature_index,
geometry_index,
)
coord_index += coord_index + 1
multi_feature_index += multi_feature_index + 1
elif geom_type == "LineString" or geom_type == "MultiPoint":
for j in range(0, len(coords)):
# if not callback(coords[j]):
# return False
callback(
coords[j],
coord_index,
feature_index,
multi_feature_index,
geometry_index,
)
coord_index += coord_index + 1
if geom_type == "MultiPoint":
multi_feature_index += multi_feature_index + 1
if geom_type == "LineString":
multi_feature_index += multi_feature_index + 1
elif geom_type == "Polygon" or geom_type == "MultiLineString":
for j in range(0, len(coords)):
for k in range(0, len(coords[j]) - wrap_shrink):
# if not callback(coords[j][k]):
# return False
callback(
coords[j][k],
coord_index,
feature_index,
multi_feature_index,
geometry_index,
)
coord_index += coord_index + 1
if geom_type == "MultiLineString":
multi_feature_index += multi_feature_index + 1
if geom_type == "Polygon":
geometry_index += geometry_index + 1
if geom_type == "Polygon":
multi_feature_index += multi_feature_index + 1
elif geom_type == "MultiPolygon":
for j in range(0, len(coords)):
geometry_index = 0
for k in range(0, len(coords[j])):
for le in range(0, len(coords[j][k]) - wrap_shrink):
# if not callback(coords[j][k][l]):
# return False
callback(
coords[j][k][le],
coord_index,
feature_index,
multi_feature_index,
geometry_index,
)
coord_index += coord_index + 1
geometry_index += geometry_index + 1
multi_feature_index += multi_feature_index + 1
elif geom_type == "GeometryCollection":
for j in range(0, len(geometry["geometries"])):
if not coord_each(
geometry["geometries"][j],
callback,
excludeWrapCoord,
):
return False
else:
raise Exception("Unknown Geometry Type")
return True | a747c1e01da228bccb167419def647afbd71b1d9 | 693,406 |
import pathlib
def read_html(filename: str) -> str:
""" Read the html file and return its contents. """
return (pathlib.Path(__file__).resolve().parent / filename).read_text() | 3d89bdae436ac908c499574b0386c9641d226bb7 | 693,407 |
import sys
import os
import ctypes
def _load_mkl_rt(lib_name):
"""
Load certain MKL library
"""
if sys.platform.startswith("win"):
lib_path = os.path.join(sys.prefix, "Library", "bin", lib_name + ".dll")
elif sys.platform == "darwin":
lib_path = os.path.join(sys.prefix, "lib", "lib" + lib_name + ".dylib")
else:
lib_path = os.path.join(sys.prefix, "lib", "lib" + lib_name + ".so")
if not os.path.exists(lib_path):
lib_path = None
if lib_path:
return ctypes.cdll.LoadLibrary(lib_path) | 7ab6fabb8da4ba1b726d6d09364ab400567273f8 | 693,408 |
def formatString(oldStr):
"""
Format string into separate lines
*NOTE*
The newStr already has new lines and the developer should use the numLines return
to align any other strings to the newStr
Parameters
----------
oldStr: string
Old string to be Formatted
Returns
----------
numLines: int
Number of lines the new string will take up
newStr: string
New string with new lines
"""
LINE_LENGTH = 32
strList = oldStr.split(" ")
numLines = 1
newStr = ""
curLen = 0
for word in strList:
if (len(word) + curLen) > LINE_LENGTH:
numLines += 1
curLen = len(word) + 1
newStr += ("\n" + word + " ")
else:
curLen += (len(word) + 1)
newStr += (word + " ")
return numLines, newStr | cac7a604beae32d951d608ee6a4f47f5b363650c | 693,409 |
def get_configs_info() -> dict:
"""Get rule config validations and descriptions."""
return {} | f7fd14b07d84cbd7c0a8632e0ef52635785d4e46 | 693,411 |
import asyncio
def event_loop(request):
"""Create an instance of the default event loop for the entire test session"""
loop = asyncio.get_event_loop()
return loop | 612fec6143f28f4738c404d74aeb2379b93f2235 | 693,412 |
def sum_num(start, stop, step=1):
"""Returns the sum of an arithmetic sequence, from ```start``` to ```stop```
(inclusive), stepping by ```step```.
Equivalent to ```sum(range(start, stop, step))``` \
(but works better on large inputs)."""
num_terms = (stop - start)//step + 1
return (start+stop)*num_terms/2 | 45515df91bd3b7bcea3e3007da0d14cf6b9e6f89 | 693,413 |
def get_column_index(column):
"""Get column index from name, e.g. A -> 1, D -> 4, AC -> 29.
Reverse of `get_column_letter()`
"""
column = str(column).lower()
col = 0
for digit, char in enumerate(column[::-1]):
value = ord(char) - 96
col += (26 ** digit) * value
return col | da595fb3aa197c50eb11bb1b89dfb1877c0e223d | 693,414 |
def get_scalers(height, width, x_max, y_min):
"""
:param height:
:param width:
:param x_max:
:param y_min:
:return: (xscaler, yscaler)
"""
w_ = width * (width / (width + 1))
h_ = height * (height / (height + 1))
return w_ / x_max, h_ / y_min | 8dda4339ac1de3d9bb8c670901f5223fe765cbf3 | 693,415 |
def get_candidates_high(sublist, idx_pos):
"""Get candidates takes a sublist of the initial list of
binary numbers, and an index position.
Then, given that index position, it considers the most common
bit in that position, in this sublist.
Finally, it dismisses all examples that do not hold this bit
in that position and returns the new list of candidates
"""
ones = [elt for elt in sublist if elt[idx_pos] == '1']
zeroes = [elt for elt in sublist if elt[idx_pos] == '0']
if len(ones) >= len(zeroes):
keep_ones = True
else:
keep_ones = False
if keep_ones is True:
return ones
else:
return zeroes | ae63ac9b8d8d93f3394266682197d46627a6080c | 693,416 |
from pathlib import Path
def completely_pruned(wkspace):
"""Determine which systematics are completely pruned.
Parameters
----------
wkspace : str or os.PathLike
Path of TRExFitter workspace.
Returns
-------
list(str)
Names of all completely pruned systematics.
"""
pairs = []
with (Path(wkspace) / "PruningText.txt").open("r") as f:
for line in f:
if not line.startswith(" --->>"):
continue
sys, status = line.strip()[7:].split(" ")
if status == "is not present":
continue
pairs.append((sys, status))
unique = sorted(set([p[0] for p in pairs]))
tests = {u: 0 for u in unique}
for sys, status in pairs:
k = 0
if status == "is kept":
k = 1
elif status == "is shape only":
k = 1
elif status == "is norm only":
k = 1
tests[sys] += k
return [k for k, v in tests.items() if v == 0] | 8026e6ba23280c70a0da8474b92b52b502455bc6 | 693,418 |
import random
import time
def unique_string(desp='', **kwargs):
"""
Generate random remporary names for temp table and other names.
It has a SQL interface so both SQL and Python functions can call it.
"""
r1 = random.randint(1, 100000000)
r2 = int(time.time())
r3 = int(time.time()) % random.randint(1, 100000000)
u_string = "__madlib_temp_" + desp + \
str(r1) + "_" + str(r2) + "_" + str(r3) + "__"
return u_string | 76fec7f6017c188f480f8fe19480b40b8749cb7b | 693,419 |
def simple_decompression(compressed_string):
"""Decompression for `simple_compression(string)`"""
string=""; i=0;
len_string = len(compressed_string)
while True:
if compressed_string[i].isdigit():
s = i
while i<len_string and compressed_string[i].isdigit():
i+=1
n = int(compressed_string[s:i])
string += string[-1]*(n-1)
else:
string+=compressed_string[i]
i+=1
if i==len_string:
break
return string | 12cc650933af16341bb089a4587da420c8440eca | 693,420 |
def _format_volume_bids(BIDPEROFFER_D, service_name_mapping):
"""Re-formats the AEMO MSS table BIDDAYOFFER_D to be compatible with the Spot market class.
Examples
--------
>>> BIDPEROFFER_D = pd.DataFrame({
... 'DUID': ['A', 'B'],
... 'BIDTYPE': ['ENERGY', 'RAISEREG'],
... 'BANDAVAIL1': [100.0, 50.0],
... 'BANDAVAIL2': [10.0, 10.0],
... 'BANDAVAIL3': [0.0, 0.0],
... 'BANDAVAIL4': [10.0, 10.0],
... 'BANDAVAIL5': [10.0, 10.0],
... 'BANDAVAIL6': [10.0, 10.0],
... 'BANDAVAIL7': [10.0, 10.0],
... 'BANDAVAIL8': [0.0, 0.0],
... 'BANDAVAIL9': [0.0, 0.0],
... 'BANDAVAIL10': [0.0, 0.0]})
>>> service_name_mapping = {'ENERGY': 'energy', 'RAISEREG': 'raise_reg'}
>>> volume_bids = _format_volume_bids(BIDPEROFFER_D, service_name_mapping)
>>> print(volume_bids)
unit service 1 2 3 4 5 6 7 8 9 10
0 A energy 100.0 10.0 0.0 10.0 10.0 10.0 10.0 0.0 0.0 0.0
1 B raise_reg 50.0 10.0 0.0 10.0 10.0 10.0 10.0 0.0 0.0 0.0
Parameters
----------
BIDPEROFFER_D : pd.DataFrame
=========== ====================================================
Columns: Description:
DUID unique identifier of a unit (as `str`)
BIDTYPE the service being provided (as `str`)
PRICEBAND1 bid volume in the 1st band, in MW (as `np.float64`)
PRICEBAND2 bid volume in the 2nd band, in MW (as `np.float64`)
PRICEBAND10 bid volume in the 10th band, in MW (as `np.float64`)
MAXAVAIL the offered cap on dispatch, in MW (as `np.float64`)
=========== ====================================================
Returns
----------
demand_coefficients : pd.DataFrame
================ =====================================================================================
Columns: Description:
unit unique identifier of a dispatch unit (as `str`)
service the service being provided, optional, if missing energy assumed (as `str`)
1 bid volume in the 1st band, in MW (as `np.float64`)
2 bid volume in the 2nd band, in MW (as `np.float64`)
:
10 bid volume in the nth band, in MW (as `np.float64`)
max_availability the offered cap on dispatch, only used directly for fcas bids, in MW (as `np.float64`)
================ ======================================================================================
"""
volume_bids = BIDPEROFFER_D.loc[:, ['DUID', 'BIDTYPE', 'BANDAVAIL1', 'BANDAVAIL2', 'BANDAVAIL3', 'BANDAVAIL4',
'BANDAVAIL5', 'BANDAVAIL6', 'BANDAVAIL7', 'BANDAVAIL8', 'BANDAVAIL9',
'BANDAVAIL10']]
volume_bids.columns = ['unit', 'service', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']
volume_bids['service'] = volume_bids['service'].apply(lambda x: service_name_mapping[x])
return volume_bids | 1c04f186f35cb800aaa4900be6b4d48e2414953e | 693,421 |
async def get_finnhub_quote(ticker, client):
"""Gets the quote for the specified ticker,
the quote includes the open price (o),
high price (h), low price (l), current price (c),
previous close price (pc), timestamp (t)
Parameters
----------
ticker : string
stock ticker
Returns
-------
quote : dictionary
quote for the stock
dec : string
string format for the numbers
"""
quote = client.quote(symbol=ticker)
if quote['t'] != 0:
return quote, '{:,.2f}'
quote = client.quote(symbol='BINANCE:' + ticker)
if quote['t'] != 0:
return quote, '{:,.5f}'
quote = client.quote(symbol='COINBASE:' + ticker)
if quote['t'] != 0:
return quote, '{:,.5f}'
# Iterate through remaining exchanges
crypto_exchanges = client.crypto_exchange()
for exchange in [i for i in crypto_exchanges if i not in ['Binance', 'COINBASE']]:
quote = client.quote(symbol=exchange + ':' + ticker)
if quote['t'] != 0:
return quote, '{:,.5f}'
return quote, None | 148b8a35276d29b1caeaf1c4dd2346b2bf57baad | 693,422 |
def to_bags(data):
"""Split minibatch into bags according to their relations. Corresponding to Eq (9) in paper.
Args:
data: one minibatch of batch size
Return:
data: a list of bags, each bag contains the instances shared the same relation.
"""
word_batch, pos1_batch, pos2_batch, pos_batch, y_batch = data
relations = set(y_batch)
num_bags = len(list(relations))
word_bags = [[] for i in range(num_bags)]
pos1_bags = [[] for i in range(num_bags)]
pos2_bags = [[] for i in range(num_bags)]
pos_bags = [[] for i in range(num_bags)]
y_bags = [[] for i in range(num_bags)]
for idx, i in enumerate(relations):
for idy, j in enumerate(y_batch):
if i == j:
word_bags[idx].append(word_batch[idy])
pos1_bags[idx].append(pos1_batch[idy])
pos2_bags[idx].append(pos2_batch[idy])
pos_bags[idx].append(pos_batch[idy])
y_bags[idx].append(y_batch[idy])
print("This batch contains {} bags.".format(num_bags))
return word_bags, pos1_bags, pos2_bags, pos_bags, y_bags, num_bags | 186c08801cf3f3963f5dc8557d62bad3570c9647 | 693,423 |
def is_word(word):
"""check whether it is an English word."""
for item in list(word):
if item not in 'qwertyuiopasdfghjklzxcvbnm':
return False
return True | 2414d264f7052244e7bb4d1cd45fb2db8160f6f7 | 693,424 |
def unconvolve_sequences(window):
"""
:param window: a numpy array of sequences of ids that was windowed
:return: the middle column
"""
if len(window.shape) == 1:
# it is already a vector
return window
middle = window.shape[1] // 2
return window[:, middle] | 1532cd545d25de5d57451280416e724ecd45ae5e | 693,425 |
def convert_string_to_bool(string):
"""Converts string to bool.
Args:
string: str, string to convert.
Returns:
Boolean conversion of string.
"""
return False if string.lower() == "false" else True | 6a481537671016a31a3406b12afa72143a36e153 | 693,426 |
def get_description(arg):
"""Generates a proper description for the given argument"""
desc = []
otherwise = False
if arg.can_be_inferred:
desc.append('If left unspecified, it will be inferred automatically.')
otherwise = True
elif arg.is_flag:
desc.append('This argument can be omitted.')
otherwise = True
if arg.type in {'InputPeer', 'InputUser', 'InputChannel'}:
desc.append(
'Anything entity-like will work if the library can find its '
'<code>Input</code> version (e.g., usernames, <code>Peer</code>, '
'<code>User</code> or <code>Channel</code> objects, etc.).'
)
if arg.is_vector:
if arg.is_generic:
desc.append('A list of other Requests must be supplied.')
else:
desc.append('A list must be supplied.')
elif arg.is_generic:
desc.append('A different Request must be supplied for this argument.')
else:
otherwise = False # Always reset to false if no other text is added
if otherwise:
desc.insert(1, 'Otherwise,')
desc[-1] = desc[-1][:1].lower() + desc[-1][1:]
return ' '.join(desc).replace(
'list',
'<span class="tooltip" title="Any iterable that supports len() '
'will work too">list</span>'
) | 0109296612d73c33c1a41aac76004708e3af0483 | 693,427 |
def createHeaderBinsNumber(number_of_bins:int):
"""
create the header for the dataset with the number of bins
:param number_of_bins: number of bins
:type number_of_bins: int
:return: vec with the labels
:rtype: int
"""
vec_designation = ['interaction_ID']
aux = 0
while aux < number_of_bins:
string_designation_bins = 'bins_' + str(aux)
vec_designation.append(string_designation_bins)
aux += 1
vec_designation.append('label')
return vec_designation | 4151e900d22d8dc7baa4d8510067a6635058aec8 | 693,428 |
import multiprocessing
def parallelize(function, iterable, nThreads = multiprocessing.cpu_count()):
"""
Parallelizes a function. Copied from pycortex so as to not have that import
@param function: function to parallelize
@param iterable: iterable object for each instance of the function
@param nThreads: number of threads to use
@type function: function with the signature Function(arg) -> value
@type iterable: list<T>
@type nThreads: int
@return: results in a list for each instance
@rtype: list<T>
"""
inputQueue = multiprocessing.Queue()
outputQueue = multiprocessing.Queue()
length = multiprocessing.Value('i', 0)
def _fill(iterable, nThreads, inputQueue, outputQueue):
for data in enumerate(iterable):
inputQueue.put(data)
length.value += 1
for _ in range(nThreads * 2):
inputQueue.put((-1, -1))
def _func(proc, inputQueue, outputQueue):
index, data = inputQueue.get()
while index != -1:
outputQueue.put((index, function(data)))
index, data = inputQueue.get()
filler = multiprocessing.Process(target = _fill, args = (iterable, nThreads, inputQueue, outputQueue))
filler.daemon = True
filler.start()
for i in range(nThreads):
proc = multiprocessing.Process(target = _func, args = (i, inputQueue, outputQueue))
proc.daemon = True
proc.start()
try:
iterlen = len(iterable)
except:
filler.join()
iterlen = length.value
data = [[]] * iterlen
for _ in range(iterlen):
index, result = outputQueue.get()
data[index] = result
return data | 04f420ca5cea29daf352be15a47d1159e92d5ca1 | 693,429 |
def one_in(setA, setB):
"""Check the presence of an element of setA in setB
"""
for x in setA:
if x in setB:
return True
return False | b2796fd9cfc1f93a71ed173430e67bc3c657eea9 | 693,430 |
import torch
def cells_to_preds(predictions, S, z_unit="micro"):
"""
Converts the model output to predictions of particles relative to the entire image
:param predictions: torch tensor with model output of shape (batch_size, anchors, S, S, 6)
:param S: grid size of model output
:param x_splits: number of divisions in x-direction of image (no overlaps)
:param y_splits: number of divisions in y_directions of image (no overlaps)
:param x_pos: torch tensor with leftmost x-position of all patches relative to image
:param y_pos: torch tensor with uppermost y_position of all pathes relative to image
:param z_unit: if 'micro' the z predictions will be converted to micrometres according to simulation settings
used in our experiments. Do not use if your images differ.
:returns converted_bboxes: torch tensor of shape as predictions with coordinates scales relative to entire image
"""
box_predictions = predictions[..., 1:6]
box_predictions[..., 0:2] = torch.sigmoid(box_predictions[..., 0:2])
if z_unit == "micro":
box_predictions[..., 2:3] = -box_predictions[..., 2:3] * 0.114
box_predictions[..., 3:4] = (
box_predictions[..., 3:4] * 1e-7
) # convert r predictions
scores = torch.sigmoid(predictions[..., 0:1])
cell_indices = (
torch.arange(S)
.repeat(predictions.shape[0], 3, S, 1)
.unsqueeze(-1)
.to(predictions.device)
)
# convert predictions relative to image
x = 1 / S * (box_predictions[..., 0:1] + cell_indices)
y = 1 / S * (box_predictions[..., 1:2] + cell_indices.permute(0, 1, 3, 2, 4))
converted_bboxes = torch.cat((scores, x, y, box_predictions[..., 2:5]), dim=-1)
return converted_bboxes | 3db8de1a3354a2bd726034b4c75968050d331fef | 693,431 |
import time
def time2int(t):
""" datetime转化为unix毫秒时间。 """
epoch = int(time.mktime(t.timetuple())*1000)
return epoch | e8f825367a6ea43b4d63338b04e4cbe6f56f6d95 | 693,432 |
def isPrime(n):
"""
Checks if a natural number is prime
:param n: the number (a positive integer) being checked
:return: boolean
"""
# prime larger than sqrt(n) cannot divide n (Number Theory 101)
cap = int( n**(.5) )
if n % 2 == 0: # remove the single even case
return False
for i in range(3, cap, 2): # ... to run through possible odd divisors
if n % i == 0:
return False
else:
return True | f488da3c9962fc88cfc4d952efab97af9e5b0dd7 | 693,433 |
def create_vista(df):
"""
Create variable called Vista
df= Dataframe
"""
df['Vista'] = df['Vista para as águas'] + df['Vista para o mar']
return df | f28c697654b1c1d63e95b45edc17b5a490ccec94 | 693,434 |
from typing import Tuple
from typing import Dict
from typing import Any
def postprocess_fbound(
smfb_0: float, smfb_1: float
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Postprocess frequency bounds
Args:
smfb_0 (float): Lower bound
smfb_1 (float): Upper bound
Returns:
dict, dict: Postprocessed settings and parameters as dictionaries"""
if smfb_0 > smfb_1:
smfb_0, smfb_1 = (smfb_1, smfb_0)
elif smfb_0 == smfb_1:
smfb_0 = 20
smfb_1 = 16000
in_kw = dict(
smfb_0=smfb_0,
smfb_1=smfb_1,
)
out_kw = dict(
sinusoidal_model__frequency_bounds=(
smfb_0, smfb_1
)
)
return in_kw, out_kw | 19efb4b4f7659bc157cd6ac1927fd4e18455f243 | 693,435 |
import numpy
def initiate():
"""read initial values from file"""
uvinitial = numpy.load('./uvinitial.npz')
U = uvinitial['U']
V = uvinitial['V']
return U, V | 92d3f00037efa7905fb0e64b48abd5a34542ffae | 693,436 |
def str_to_tuple(astr):
"""Helper function for creating a vector (tuple) from the given string."""
return tuple(map(lambda c: ord(c) - ord("a"), astr)) | 15ac36704bd1d09fc8a2b8d0b6c8cc764335206a | 693,437 |
import json
def json_pretty_print(json_dict):
"""pretty print json data"""
return json.dumps(json_dict,
indent=2,
sort_keys=True) | 722e27ffda86b0ee86e642771fb036d4c55f1e20 | 693,438 |
def to_str(s, integer=True):
"""Convert a column to strings
usage: new_var = data.apply(lambda f : to_str(f['COLNAME']) , axis = 1)
integer: boolean for whether to convert to int first
"""
try:
if integer:
s1 = str(int(s))
else:
s1 = str(s)
return s1
except ValueError:
return s | 0239df3715d7e153e3ba3bbee31bf9b98d4505ed | 693,439 |
def pre_compute(leaf, data, scope=None, **kwargs):
""" Transform data prior to calling ``compute`` """
return data | 4403a21f8acacdaf609a46f51a103cf00a91bfde | 693,440 |
def get_number_aliens_x(ai_settings, alien_width):
"""Determine the number of aliens that fit in a row."""
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
# number_aliens_x = int(available_space_x / (alien_width))
return number_aliens_x | 2684a628d9c957bd7e3edbdb8eb98b83fa4c599f | 693,441 |
def myDecorator(func):
""" Decorador que imprime el nombre de la funcion """
def wrapper(*args, **kwargs):
print(func.__name__)
return func(*args, **kwargs)
return wrapper | e5f75b9f2e97b10ab67fbe1df0e3bead06075c85 | 693,442 |
def getCenter(box):
"""
This function calculates the center of a bounding box.
"""
# get the corners of the box
x1 = box[0]
x2 = box[2]
y1 = box[1]
y2 = box[3]
# find the middle along the x axis
center_x = int((x1+x2)/2)
# find the middle along the y axis
center_y = int((y1+y2)/2)
# return that position
return (center_x, center_y) | d09a8954e1489f58a9bfa774fca3263eaa97c864 | 693,443 |
def media_final_aprovado_reprovado(prova, exercicio, projeto):
""" Recebe as notas de 1 prova, 1 exercício e 1 projeto, e retorna
se o aluno foi ou não aprovado. A prova tem peso 4, o exercício
tem peso 1, e o projeto tem peso 3."""
nota1 = prova * 4
nota2 = exercicio * 1
nota3 = projeto * 3
mediafinal = (nota1 + nota2 + nota3) / 8
return mediafinal >= 7 | 4ec5cfbbf721fbe65aa8ad3d2fd0f3806703c8fc | 693,444 |
import json
def hardware_monitor():
""" Endpoint for frontend with information about CPU, RAM and disk usage """
file = open('./logs/hardware/hardware.json', 'r')
statistics = json.load(file)
file.close()
return '{ \"STATISTICS\": ' + str(statistics).replace('\'', '"') + '}' | 85207f98b0f44654bfe09c5cf5251fd6b274d490 | 693,445 |
import json
def dict_to_bytes(data : dict) -> bytes:
"""
Convert dict to bytes
=====================
Parameters
----------
data : dict
Data to convert.
Returns
-------
bytes
The converted data.
"""
return json.dumps(data).encode('utf-8') | 6301ff8e819d09c80a3568c3b9bd45648fd72a57 | 693,446 |
import json
import hashlib
def json_based_stable_hash(obj):
"""Computes a cross-kernel stable hash value for the given object.
The supported data structure are the built-in list, tuple and dict types.
Any included tuple or list, whether outer or nested, may only contain
values of the following built-in types: bool, int, float, complex, str,
list, tuple and dict.
Any included dict, whether outer or nested, may only contain keys of a
single type, which can be one the following built-in types: bool, int,
float, str, and may only contain values of only the following built-in
types: bool, int, float, complex, str, list, tuple, dict.
Parameters
---------
obj : bool/int/float/complex/str/dict/list/tuple
The object for which to compute a hash value.
Returns
-------
int
The computed hash value.
"""
encoded_str = json.dumps(
obj=obj,
skipkeys=False,
ensure_ascii=False,
check_circular=True,
allow_nan=True,
cls=None,
indent=0,
separators=(',', ':'),
default=None,
sort_keys=True,
).encode('utf-8')
return hashlib.sha256(encoded_str).hexdigest() | 9e740ffd2cb59f5a9697ea7df74e780ab5fa7ddd | 693,448 |
from pathlib import Path
from typing import Tuple
from typing import List
def python_paths_find(python_package_path: Path, tracing: str = "") -> Tuple[Path, ...]:
"""Find all of the python files inside of a Python package."""
# next_tracing: str = tracing + " " if tracing else ""
if tracing:
print(f"{tracing}=>python_paths_find({python_package_path})")
python_path: Path
python_files: List[Path] = [
python_path
for python_path in python_package_path.glob("*.py")
if python_path.name != "__init__.py"
]
python_files.sort()
if tracing:
print(f"{tracing}<=python_paths_find({python_package_path})=>{python_files}")
return tuple(python_files) | 254841b048469c19cca4f6f4aadf57ecc5e5aea1 | 693,449 |
def reformat(formula):
"""Add spaces around each parens and negate and split the formula."""
formula = ''.join(f' {i} ' if i in '~()' else i for i in formula)
return formula.split() | 661327cc35faa2fe85c6fd0e38013f2ae4b55482 | 693,450 |
from dateutil import tz
def to_local_time_zone(date_input):
"""Returns datetime object transformed to the local time zone.
Args:
date_input (datetime): value to be transformed.
"""
if date_input.tzinfo is None:
return date_input
else:
return date_input.astimezone(tz.tzlocal()).replace(tzinfo=None) | 857f39f45c5cf58e67cb0de79247f32dffe563d3 | 693,451 |
def get_inputs(files, config):
"""
finds the inputs for the imob experiment (one IMI and one OBS file,
respectively) for each epitome stage seperately.
"""
inputs = {}
for exported in config.study_config['fmri']['imob']['glm']:
candidates = filter(lambda x: '{}.nii.gz'.format(exported) in x, files)
tagged_candidates = []
for tag in config.study_config['fmri']['imob']['tags']:
tagged_candidates.extend(filter(lambda x: '_{}_'.format(tag) in x, candidates))
if len(tagged_candidates) == 2:
inputs[exported] = tagged_candidates
else:
raise Exception(candidates)
return inputs | dcb4d89d1dab5709774e1e7c4c1549d3660581ea | 693,452 |
def get_int_argument(args, argname, default=0):
"""
Helper function to extract an integer argument.
Will raise ValueError if the argument exists but is not an integer.
Will return the default if the argument is not found.
"""
arg = args.get(argname)
if arg is not None:
try:
arg = int(arg)
except ValueError:
raise ValueError('Error while parsing argument %s' % argname)
else:
arg = default
return arg | a8320a8fcc5730e2651555b1b68849ccdee2e118 | 693,453 |
def update_array(array, position, value):
"""
updates an array inserting a _value_ to a chosen _position_ in an _array_
overcomes a general restriction disabling to use an assignment in lambda statements
:param array: _array_
:param position: _position_
:param value: _value_
"""
array[int(position)] = value
return array | c878039e0d4a437f382e31331f30deb9cec49c1e | 693,454 |
import math
def _GetPercentile(sortedlist, percent):
"""Returns a desired percentile value of a sorted list of numbers.
E.g., if a list of request latencies is
[1, 4, 7, 14, 34, 89, 100, 123, 149, 345], and percent is 0.9, the result
is 149. If percent is 0.5 (median), result is 34.
Args:
sortedlist: A sorted list of integers, longs or floats.
percent: A fraction between 0 and 1 that indicates desired
percentile value. E.g., 0.9 means 90th percentile is desired.
Returns:
None if list is empty. Else, the desired percentile value.
"""
if not sortedlist:
return None
k = int(math.ceil(len(sortedlist) * percent)) - 1
if k < 0:
k = 0
return sortedlist[k] | c3d726861feb8a28493e9616fad071394eb14e34 | 693,455 |
def path_vars_in(d):
"""
Extract all (and only) the path vars in a dictionary.
:param d: a .paths.json data structure
:return: all path var definitions without any special entries like '__ENV'
"""
return [p for p in d.items() if p[0] != '__ENV'] | 5551483660477d8f16e057c1b723413b78055408 | 693,456 |
import math
def josephus_problem_2(n: int) -> int:
"""
J(2^m + l, 2) = 2 * l + 1
"""
m = int(math.log2(n))
pow_m = pow(2, m)
return 2 * (n - pow_m) + 1 | 5f4e69b5de1ca8d175565c7da604557e0865160c | 693,457 |
def valid_endpoint_address(request):
"""
Fixture that yields valid USB endpoint addresses.
"""
return request.param | 22529a83926affe44365b81bcfb0b27b13c0b4c7 | 693,458 |
def set_sink_to_use(sinkName: str) -> dict:
"""Sets a sink to be used when the web page requests the browser to choose a
sink via Presentation API, Remote Playback API, or Cast SDK.
Parameters
----------
sinkName: str
"""
return {"method": "Cast.setSinkToUse", "params": {"sinkName": sinkName}} | 2af17df091f6734cc609c143759542e4d9002dfc | 693,460 |
def topological_sort(end_node):
"""
Performs a topological start
beginning with end_node and
working backwards via parents
until the start of the computation
is reached.
"""
stack = [end_node]
visited = {}
topo_sorted = []
while stack:
node = stack.pop()
if node in visited:
del topo_sorted[topo_sorted.index(node)]
visited[node] = True
parents = node.parents
stack.extend(parents)
topo_sorted.append(node)
return topo_sorted | c0c3335a7d48e487592ea0aff8d187f42242d8c6 | 693,461 |
def even_fibonacci_numbers(n):
"""Return the sum of all even Fibonacci numbers below `n`.
Parameters
----------
n : int
The number up to which the sum of Fibonacci numbers is computed.
Returns
-------
int
The sum of all Fibonacci numbers up to `n`.
"""
# begin solution
s = 0
t_previous = 0
t = 1
while t < n:
if t % 2 == 0:
s += t
t, t_previous = t + t_previous, t
return s
# end solution | cf2baf3696bd5f3e25d6927f5911e3c9a7df5923 | 693,462 |
def orbitals(partition_set):
"""Prepares the feature vector. Returns a dictionnary, where keys are the basis vectors (=orbitals) and values
are their coordinates (= probability of each orbital)"""
dictio = {}
dictio[tuple((0,)*8)]=0 #let's not forget first vector
for i in partition_set:
l = len(i)
j = i + tuple((0,)*(8-l))
dictio[j] = 0
return dictio | 84569e152911667431c01672ed8af000a29a746d | 693,463 |
import os
def cleanup(self, args):
"""
Remove .disasm and .svg files created by graphbuilder
No args
:Example:
reversing\@icsref:$ cleanup
"""
try:
prg = self.prg
except AttributeError:
print('Error: You need to first load or analyze a program')
return 0
for dirname, dirnames, filenames in os.walk(os.path.join(os.getcwd(), 'results', prg.name)):
for i in filenames:
if i.endswith('.disasm'):
os.remove(os.path.join(dirname, i))
if i.endswith('.svg'):
os.remove(os.path.join(dirname, i))
if i.endswith('.PRG'):
os.remove(os.path.join(dirname, i))
if i.endswith('.CHK'):
os.remove(os.path.join(dirname, i))
if i == 'analytics.txt':
os.remove(os.path.join(dirname, i))
print('Cleanup complete')
return 0 | 2e5a4a3ccbd3fe2b0b339ea804b266c8ff7075be | 693,464 |
import string
import secrets
def random_string(
n: int = 8,
*, _ALPHABET=''.join([string.ascii_lowercase, string.digits])
) -> str:
"""Generate a random string of lowercase ascii letters and digits.
Parameters
----------
n: int, optional
The number of characters which the output string will have. Default=8
"""
token = [secrets.choice(_ALPHABET) for i in range(n)]
return ''.join(token) | cdb28a85398e8c59a4ac53441f7be2fd71d26cc8 | 693,465 |
def node_para(args):
"""
Node parameters.
:param args: args
:return: node config or test node config list
"""
node_list = []
i = 0
if args.find("//") >= 0:
for node in args.split("//"):
node_list.append([])
ip, name, passwd = node.split(",")
node_list[i].append(ip)
node_list[i].append(name)
node_list[i].append(passwd)
i += 1
else:
node_list.append([])
ip, name, passwd = args.split(",")
node_list[i].append(ip)
node_list[i].append(name)
node_list[i].append(passwd)
return node_list | d97c733fd12bca2b03f10a6a0e41f3bf4586ebee | 693,466 |
def format_production_set(productions):
"""Renders a set of productions in a human-readable format."""
return "\n".join([str(production) for production in sorted(productions)]) | a5bcf086d15356f6d532c0fae0e2a736f5869d8a | 693,467 |
def generateJobName(key):
"""
Transcribe job names cannot contains spaces. This takes in an S3
object key, extracts the filename part, replaces spaces with "-"
characters and returns that as the job-name to use
"""
# Get rid of leading path, and replace [SPACE] with "-"
response = key
if "/" in key:
response = response[1 + key.find('/'):]
response = response.replace(" ", "-")
return response | c52cc4d3417f85c55ab69f63016608b0ff4ff7ca | 693,468 |
def exceeds_max_lines(filepath: str, max_lines: int) -> bool:
"""
Test if given file in ``filepath`` has a content exceeds max lines
``max_lines``.
>>> exceeds_max_lines(__file__, 1000000)
False
>>> exceeds_max_lines(__file__, 10)
True
"""
with open(filepath) as fobj:
# Which is better?
# return len(fobj.readliens()) > max_lines
for idx, _line in enumerate(fobj):
if idx + 1 > max_lines:
return True
return False | c5f8342359245c9d00dc2ca42eea2187d9c22c12 | 693,469 |
import json
def get_missing_params_msg(param_name):
"""Util function to return error response when a parameter is missing
:param param_name: the missing parameter
:type param_name: str
"""
return json.dumps({'success': False, 'message': 'Missing parameter: {}'.format(param_name)}).encode('utf-8') | c8385695bc1b29d15b3d47e82e1d8c9ba353ab45 | 693,470 |
import functools
import asyncio
def async_test(wrapped):
"""
Run a test case via asyncio.
Example:
>>> @async_test
... async def lemon_wins():
... assert True
"""
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
return asyncio.run(wrapped(*args, **kwargs))
return wrapper | bb30d599e636bd0d33e1169637e4ab5fdf8c86e6 | 693,471 |
def dobro(num):
"""
-> Calcula o dobro de um número
:param num: Número que terá seu dobro calculado
:return: retorna o dobro de 'num'
"""
return num*2 | 756832fa1f1ea9cdc1bae9a54ac05567b603c4c4 | 693,472 |
def conta_buracos_no_texto(texto):
"""
Se você pensar em um papel como um plano e uma letra como uma marcação neste plano, então estas letras dividem o
plano em regiões. Por exemplo, as letras A, D e O dividem o plano em 2 pois possuem um espaço confinado em seu
desenho, ou um “buraco”. Outras letras como B possuem 2 buracos e letras como C e E não possuem buracos.
Deste modo podemos considerar que o número de buracos em um texto é igual a soma dos buracos nas palavras dele.
A sua tarefa é, dado um texto qualquer, encontre a quantidade de buracos nele.
"""
contador = 0
for char in texto.upper():
if char in 'ADOPQR':
contador += 1
elif char == 'B':
contador += 2
return contador | 57642cabffde02a841c068ebb11e98a380fcc13b | 693,473 |
import functools
import time
def compute_time_ms(func):
"""
Decorator for computing the amount of time taken to do something
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
start_time = time.time()
original_result = func(*args, **kwargs)
time_elapsed = time.time() - start_time
print(f"Time taken: {time_elapsed * 1000}")
return original_result
return wrapper | 6b4807f09d1d3fe1bdace0e9924be7ddd859b799 | 693,474 |
def generate_specificity_at_threshold(threshold, weighted=False):
"""
Returns a lambda function that computes the specificity at a provided threshold.
If weights = True, the lambda function takes a third argument for the sample weights
"""
if not weighted:
return (
lambda labels, pred_probs: (
(labels == 0) & (labels == (pred_probs >= threshold))
).sum()
/ (labels == 0).sum()
if (labels == 0).sum() > 0
else 0.0
)
else:
return (
lambda labels, pred_probs, sample_weight: (
((labels == 0) & (labels == (pred_probs >= threshold))) * sample_weight
).sum()
/ ((labels == 0) * sample_weight).sum()
if (labels == 0).sum() > 0
else 0.0
) | 363ed90992c6fca04967e84a71ad1c1a2c84538f | 693,475 |
import string
import random
def create_strings_randomly(length, allow_variable, count, let, num, sym, lang):
"""
Create all strings by randomly sampling from a pool of characters.
"""
print("Generating strings by randomly sampling from a pool of characters ...")
# If none specified, use all three
if True not in (let, num, sym):
let, num, sym = True, True, True
pool = ''
if let:
if lang == 'cn':
# Unicode range of CHK characters
pool += ''.join([chr(i) for i in range(19968, 40908)])
else:
pool += string.ascii_letters
if num:
pool += "0123456789"
if sym:
pool += "!\"#$%&'()*+,-./:;?@[\\]^_`{|}~"
if lang == 'cn':
min_seq_len = 1
max_seq_len = 2
else:
min_seq_len = 2
max_seq_len = 10
strings = []
for _ in range(0, count):
current_string = ""
for _ in range(0, random.randint(1, length) if allow_variable else length):
seq_len = random.randint(min_seq_len, max_seq_len)
current_string += ''.join([random.choice(pool)
for _ in range(seq_len)])
current_string += ' '
strings.append(current_string[:-1])
return strings | 16bb9d641306d012c0726d75d87f0072fde8c874 | 693,476 |
def spatial_average(in_tens, keepdim=True):
""" https://github.com/richzhang/PerceptualSimilarity/blob/master/lpips/lpips.py ."""
return in_tens.mean([2, 3], keepdim=keepdim) | 09abd14328e1a76b9ddfc32d096c47c6c41fd33e | 693,477 |
def isGameOver(t1, t2, iRound, print_result=False):
"""test if shootout shoud be ended
"""
if iRound < 3:
return False
if 5 > iRound >= 3:
if t1.score == t2.score:
return False
if t1.score > t2.score:
t2Max = t2.score + 5 - t2.nTake
if t1.score > t2Max:
if print_result:
print(t1.score, '\t:\t', t2.core)
print(''.join(t1.scoreBook), '\t:\t', ''.join(t1.scoreBook))
return True
else:
return False
if t1.score < t2.score:
t1Max = t1.score + 5 - t1.nTake
if t1Max < t2.score:
if print_result:
print(t1.score, '\t:\t', t2.core)
print(''.join(t1.scoreBook), '\t:\t', ''.join(t1.scoreBook))
return True
else:
return False
if iRound >= 5:
if t1.nTake > t2.nTake:
return False
else:
if t1.score == t2.score:
return False
else:
if print_result:
print(t1.score, '\t:\t', t2.core)
print(''.join(t1.scoreBook), '\t:\t', ''.join(t1.scoreBook))
return True | 1b88b18f959cf77e7c9ba41d461b294541fa235b | 693,478 |
import numpy as np
def fabric_orientation(fabric_points):
"""
Calculate the edges of the fabric (2D) as vectors to find the orientation of the fabric compared to the human hand
:param fabric_points: list with fabric edge points in space
:return: fabric edge vertices, and center point
"""
# Import libraries only for this function to minimize memory usage
x_axis = [1, 0] # X axis vector
y_axis = [0, 1] # Y axis vector
b_line = [fabric_points[0][0] - fabric_points[1][0], fabric_points[0][1] - fabric_points[1][1]] # Bottom line vector
l_line = [fabric_points[1][0] - fabric_points[2][0], fabric_points[1][1] - fabric_points[2][1]] # Left line vector
t_line = [fabric_points[2][0] - fabric_points[3][0], fabric_points[2][1] - fabric_points[3][1]] # Top line vector
r_line = [fabric_points[3][0] - fabric_points[0][0], fabric_points[3][1] - fabric_points[0][1]] # Right line vector
b_rot_x = (np.arccos(np.dot(b_line, x_axis)/(np.linalg.norm(b_line)*np.linalg.norm(x_axis))))*(180/np.pi) # in degrees
b_rot_y = (np.arccos(np.dot(b_line, y_axis)/(np.linalg.norm(b_line)*np.linalg.norm(y_axis))))*(180/np.pi) # in degrees
l_rot_x = (np.arccos(np.dot(l_line, x_axis)/(np.linalg.norm(l_line)*np.linalg.norm(x_axis))))*(180/np.pi) # in degrees
l_rot_y = (np.arccos(np.dot(l_line, y_axis)/(np.linalg.norm(l_line)*np.linalg.norm(y_axis))))*(180/np.pi) # in degrees
t_rot_x = (np.arccos(np.dot(t_line, x_axis)/(np.linalg.norm(t_line)*np.linalg.norm(x_axis))))*(180/np.pi) # in degrees
t_rot_y = (np.arccos(np.dot(t_line, y_axis)/(np.linalg.norm(t_line)*np.linalg.norm(y_axis))))*(180/np.pi) # in degrees
r_rot_x = (np.arccos(np.dot(r_line, x_axis)/(np.linalg.norm(r_line)*np.linalg.norm(x_axis))))*(180/np.pi) # in degrees
r_rot_y = (np.arccos(np.dot(r_line, y_axis)/(np.linalg.norm(r_line)*np.linalg.norm(y_axis))))*(180/np.pi) # in degrees
fabric_edges = np.array([[b_rot_x, b_rot_y], [l_rot_x, l_rot_y], [t_rot_x, t_rot_y], [r_rot_x, r_rot_y]])
# Calculate fabric center point
xmin, ymin, zmin = fabric_points.min(axis=0)
xmax, ymax, zmax = fabric_points.max(axis=0)
fabric_center = np.array([(xmax+xmin)/2, (ymax+ymin)/2, (zmax+zmin)/2])
return fabric_edges, fabric_center | 7f460ace6ef5f2623bb6d101e232b4e3fc79f876 | 693,479 |
def is_builtin_entity(oid):
"""Return if the OID hex number is for a built-in entity."""
# More information in get_oid
oid_num = int(oid, 16)
return oid_num & 0xC0 == 0xC0 | 662aea7ecf6d1b8671f9e2513576bbb448ffcae6 | 693,480 |
def padStringZero(versionstr, length, padding):
""" Make Version String have same length"""
return versionstr + [padding] * abs((len(versionstr)-length)) | 1ed3202cd0188579ecf82156d9bc2de7ce11c260 | 693,481 |
def get_AZN(nco_id):
"""Returns mass number :math:`A`, charge :math:`Z` and neutron
number :math:`N` of ``nco_id``.
Args:
nco_id (int): corsika id of nucleus/mass group
Returns:
(int,int,int): (Z,A) tuple
"""
Z, A = 1, 1
if nco_id >= 100:
Z = nco_id % 100
A = (nco_id - Z) // 100
else:
Z, A = 0, 0
return A, Z, A - Z | ae392893b26bf714c3044888df05c0cc63458eea | 693,482 |
import os
def csv_with_content(fpath):
"""Return true if file exists AND file has more than 1 row, false otherwise"""
if fpath and os.path.exists(fpath):
row_count = sum(1 for line in open(fpath))
return row_count > 1
return False | 7d9dac114d351ec7e6ffd140f9df5763d3361e8d | 693,484 |
def stopwatch_format(ticks):
"""
Convert tenths of seconds to formatted time
"""
minutes = ticks // 600
# minutes = ticks // 60
tens_seconds = (ticks // 100) % 6
seconds = (ticks // 10) % 10
tenths = ticks % 10
return str(minutes) + ':' + str(tens_seconds) + \
str(seconds) + '.' + str(tenths) | e2b0cdb644beed43451da8db2c1ce1ff80be10c4 | 693,486 |
import numpy
def calculate_octave_and_tile_sizes(source_size, nn_image_size, max_octaves=4, octave_scale=1.4, overlap_percentage=0.25):
"""
:type source_size: Array of 2 integers
:param source_size: [height, width] of image to have the dream
:type nn_image_size: integer
"""
# find octave sizes
# array of [h,w] arrays
octave_sizes = [list(source_size)]
while len(octave_sizes) < max_octaves and min(octave_sizes[-1]) > nn_image_size:
min_dim = min(octave_sizes[-1])
scale = min(octave_scale, float(min_dim) / nn_image_size)
new_dims = [int(dim / scale) for dim in octave_sizes[-1]]
octave_sizes.append(new_dims)
assert(numpy.array(octave_sizes).min() >= nn_image_size)
# calculate tile limits per octave (and normalizing coefs)
octave_tile_corners = []
for size in octave_sizes:
h,w = size
max_h = (h-nn_image_size); max_w = (w-nn_image_size);
stride = int(nn_image_size - overlap_percentage*nn_image_size)
tops = [0]
while tops[-1] < max_h:
tops.append(tops[-1]+stride)
tops[-1] = max_h
lefts = [0]
while lefts[-1] < max_w:
lefts.append(lefts[-1]+stride)
lefts[-1] = max_w
tile_corners = []
for top in tops:
for left in lefts:
if not [top,left] in tile_corners:
tile_corners.append([top,left])
octave_tile_corners.append(tile_corners)
return(octave_sizes,octave_tile_corners) | 9eae1928e1d9be7cebea1d359a184b0a1ebc2e4b | 693,488 |
import os
def test_folder_contents_have_common_prefix(recipe):
"""Determine whether folder contents have a common prefix of NAME.
Args:
recipe: Recipe object.
Returns:
Tuple of Bool: Failure or success, and a string describing the
test and result.
"""
result = None
name = recipe["Input"].get("NAME")
description = "All files have prefix of product (NAME: '%s')." % name
files = os.listdir(os.path.dirname(recipe.filename))
result = all((filename.startswith(name) for filename in files))
return (result, description) | 6b31f9928cb01308557b05128f738507d83d43d7 | 693,489 |
def _approximate_bias(b, name):
"""
Find a reasonable match for the given name when we have existing biases in gazetteer entry.
Otherwise if the name is just long enough it should be rather unique and return a high bias ~ 0.3-0.5
If a name varies in length by a third, we'll approximate the name bias to be similar.
:param b: dict of name:bias
:param name: normalize name
:return:
"""
if name in b:
return b.get(name)
nmlen = len(name)
diff = int(nmlen / 3)
for n in b:
nlen = len(n)
if abs(nmlen - nlen) < diff:
return b.get(n)
if nmlen >= 20:
return 0.40
return 0.05 | 00bea5b36b1050f74a4763795dae6626d2db8f44 | 693,490 |
def get_e_rtd_default(hw_type):
"""当該給湯機の効率
Args:
hw_type(str): 給湯機/給湯温水暖房機の種類
Returns:
float: 当該給湯機の効率
"""
if hw_type in ['ガス潜熱回収型給湯機', 'ガス潜熱回収型給湯温水暖房機']:
return 0.836
elif hw_type in ['ガス従来型給湯機', 'ガス従来型給湯温水暖房機']:
return 0.704
else:
raise ValueError(hw_type) | efc90c2a19d894093e790518e4d67f9fa4d3d700 | 693,491 |
def get_dict_value(dict_, key):
"""
Return the value for key in the (JSON) dictionary.
"""
return dict_[key] if key in dict_ else None | 32e12b0443adbcf4e5777ceaa4498e8aae60b002 | 693,492 |
import importlib
def import_subclass(subclass_path, package=None, required_base_class=None):
"""
Import and return a subclass.
*subclass_path* must be in the form of 'module.subclass'.
"""
module, _, subclass_name = subclass_path.rpartition('.')
if package and not module.startswith('.'):
module = '.' + module
subclass = getattr(importlib.import_module(module, package), subclass_name)
if required_base_class and not issubclass(subclass, required_base_class):
raise ValueError("Provided class is not a subclass of *required_base_class*")
return subclass | 21a204f7ac1643b17467be4313f233adfd9c6491 | 693,493 |
def implied_r(f):
""" Dimension for search implied by a skater name """
# Generally this isn't encouraged, as skaters might be created by functools.partial and folks
# may forget to apply functools.upate_wrapper to preserve the __name__
name = f if isinstance(f,str) else f.__name__
if '_r2' in name:
return 2
elif '_r3' in name:
return 3
elif '_r1' in name:
return 1
else:
return 0 | 5c929013ea15d5a8cbac00dd686f797b25bc1834 | 693,495 |
def paid_group_client(client, paid_user):
"""Get a client in the Paid group."""
client.force_login(paid_user)
return client | 69f0b1f7d58b50bdddfde4ad4eef1e4d2df9378e | 693,496 |
def nearest_power_of_two(x):
"""
Return a number which is nearest to `x` and is the integral power of two.
Parameters
----------
x : int, float
Returns
-------
x_nearest : int
Number closest to `x` and is the integral power of two.
"""
x = int(x)
x_lower = 1 if x == 0 else 2 ** (x - 2).bit_length()
x_upper = 1 if x == 0 else 2 ** (x - 1).bit_length()
x_nearest = x_lower if (x - x_lower) < (x_upper - x) else x_upper
return x_nearest | 77460ea1adeabbf4ba2eee50993496916b693510 | 693,497 |
def dummy_ngettext(singular, plural, n):
"""
Mimic ``ngettext()`` function. This is a passthrough function with the
same signature as ``ngettext()``. It can be used to simulate translation
for applications that are untranslated, without the overhead of calling the
real ``ngettext()``.
This function returns the verbatim singular message if ``n`` is 1,
otherwise the verbatim plural message.
"""
if n == 1:
return singular
return plural | d9b51ae130a325b21fe00606221873461d11f818 | 693,498 |
import numpy
def nearest_point_pair(pointset, coord):
"""
Return the two nearest points in pointset, leftmost first.
"""
# XXX: Extremely naive, compute distances to all points and sort
distances = numpy.sqrt((pointset[:,0] - coord[0]) ** 2 + (pointset[:,1] - coord[1]) ** 2 + (pointset[:,2] - coord[2]) ** 2)
dist_i = sorted(zip(distances, range(len(distances))), key = lambda o: o[0])
i, j = dist_i[0][1], dist_i[1][1]
if i > j:
i, j = j, i
return (i, j) | 17173f12fb3275ec9ba55c6d932053f6293c8c47 | 693,499 |
def _is_same_language(locale1, locale2):
"""Compares whether two locacle is the same language."""
return locale1 == locale2 | ae8ee9456c82c8a3fe683ae5bf5c02dfd524f759 | 693,500 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.