content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def split_repr(data, cols):
"""Split the binary representation of data to fit the given number of cols
"""
rep = repr(data)
if len(rep) <= cols:
return rep, None
# Do a dichotomy to find an index where to cut the data
min_idx = 1
max_idx = min(cols, len(data) - 1)
while min_idx != max_idx:
# Sanity checks
assert min_idx < max_idx
assert len(repr(data[:min_idx])) <= cols
assert len(repr(data[:max_idx + 1])) > cols
cur = (min_idx + max_idx + 1) // 2
rep = repr(data[:cur])
if len(rep) <= cols:
min_idx = cur
elif len(rep) > cols:
max_idx = cur - 1
return repr(data[:min_idx]), data[min_idx:] | 883ce94083f1277bc1d42dec5a1f127901489e6f | 692,506 |
def equal(a, b):
"""判断两个 object 的内容是否一致(只判断浅层数据)"""
return a.__dict__ == b.__dict__ | a157d8876ad3128fa6b73fe5f5db6d2773abc0c3 | 692,507 |
def hex_colour(color: int) -> str:
"""
Converts an integer representation of a colour to the RGB hex value.
As we are using a Discord dark theme analogue, black colours are returned as white instead.
"""
colour = f"#{color:0>6X}"
return colour if colour != "#000000" else "#FFFFFF" | 6bd34a0abd0e2465c89abe9481f2d04e8a54091d | 692,508 |
def dict_to_patch_name(patch_image_name_dict):
""" Usage: patch_name = dict_to_patch_name(patch_image_name_dict)
convert the dictionary into a file name string
Args:
patch_image_name_dict: {'case_id': 'd83cc7d1c94',
'location_x': 100,
'location_y': 340,
'class_label': 'dermis',
'file_type': '.jpg' }
Returns:
patch_name: file name (without directory path)
"""
if len(patch_image_name_dict['file_ext']) > 1 and patch_image_name_dict['file_ext'][0] != '.':
patch_image_name_dict['file_ext'] = '.' + patch_image_name_dict['file_ext']
patch_name = patch_image_name_dict['case_id']
patch_name += '_%i'%patch_image_name_dict['location_x']
patch_name += '_%i'%patch_image_name_dict['location_y']
patch_name += '_%s'%patch_image_name_dict['class_label']
patch_name += '%s'%patch_image_name_dict['file_ext']
return patch_name | 512bf8f291613967f3b7ef06299f8912ca0c140a | 692,509 |
def ieee_1789_2015(frequency:float, percent_flicker:float) -> str:
"""Tests for compliance with IEEE 1789-2015
Refer to 8.1.1 Simple recommended practices in IEEE 1789-2015 for rule definitions
Parameters
----------
frequency : float
The flicker frequency in Hertz
percent_flicker : float
The flicker percentage
Returns
-------
str
Either of: "No Risk", "Low Risk", "High Risk"
"""
if frequency > 3000:
return "No Risk"
if frequency < 90:
if percent_flicker < 0.01 * frequency:
return "No Risk"
if percent_flicker < 0.025 * frequency:
return "Low Risk"
# Other flicker <= 3 kHz
if percent_flicker < 0.0333 * frequency:
return "No Risk"
if frequency <= 1250:
if percent_flicker < 0.08 * frequency:
return "Low Risk"
return "High Risk" | 433e4f8eedde40880c26812e3fcd977cdcae83f2 | 692,510 |
def speed_index(speed):
"""Add a speed index to sources methods to select the best one according to
user input.
"""
def decorator(func):
setattr(func, 'speed_index', speed)
return func
return decorator | 7745e4fe5a9736b9ac0df01138a3991f727a05d5 | 692,511 |
import os
def get_filenames(path, contains, does_not_contain=('~', '.pyc')):
"""
Create list of files found in given path that contain or do not contain certain strings.
:param path: path in which to look for files
:type path: string (directory path)
:param contains: string that filenames must contain
:type contains: string
:param does_not_contain: list of strings that filenames must not contain, optional
:type does_not_contain: list of strings
:return: list of filenames
:rtype: list of strings
"""
cmd = 'ls ' + '"' + path + '"'
ls = os.popen(cmd).read()
all_filelist = ls.split('\n')
all_filelist.remove('')
filelist = []
for _, filename in enumerate(all_filelist):
if contains in filename:
fileok = True
for nc in does_not_contain:
if nc in filename:
fileok = False
if fileok:
filelist.append(os.path.join(path, filename))
return filelist | b7a0d40a36c0a159e9f1a601029d8e1a871aea20 | 692,512 |
def nb_changeset_by_uid(data):
"""count the number of changesets by user id.
data: dask.dataframe
return a pandas.DataFrame
"""
grp = (data.drop_duplicates(subset=['id'])
.groupby('uid')['uid']
.count())
return grp.compute() | 7af8b32d74669c4ffdcc81488a51b82c6fd5f0f3 | 692,514 |
def mult(x,y):
"""
Take two integers x and y and return their product
"""
product = 0
for i in range(x):
product += y
return product | 379f9b56b9dd6f5996a03cfe61733fb6d5a390b9 | 692,515 |
def check(expected, computed, label):
"""Checks the test result, if wrong then prints a fail message."""
success = True
if expected != computed:
success = False
print("FAILED TEST AT: ", label)
return success | 8dc633ea49b52071e1c8340d5280866837ee1a84 | 692,518 |
def mock_grpc_response(response, proto_method):
"""Fakes gRPC response channel for the proto_method passed.
Args:
response (dict): Expected response.
Returns:
dict: Expected response.
"""
return proto_method(**response) | bb570b8b14c1c9ecf18848b617dc3d342dec5e68 | 692,519 |
def hdr3Dto2D(hdr3D,verbose=True):
"""
Removing the wavelength component of a header, i.e., converting
the hdr from 3D (lambda, dec, ra) to 2D (dec, ra)
--- INPUT ---
hdr3d The header object to convert from (lambda, dec, ra) to (dec, ra)
verbose Toggle verbosity
"""
for key in hdr3D.keys():
if any(a in key for a in ('S3', '_3')):
del hdr3D[key]
return hdr3D | 9da4a0be55f9dd42ff742197306b768dfd6c7170 | 692,520 |
def match_by_split(split: str) -> dict:
"""Get the $match query by split one of ['train', 'valid', 'test']."""
return {"$and": [{"is_AF": {"$exists": True}}, {"split": split}]} | 6182892d62766754c63d6f13f6b8520ba5e4aa67 | 692,521 |
from typing import Iterable
def all_alnum(value: Iterable) -> bool:
"""Check string items in the value, If all string item is ascii or numeric, return True"""
for item in value:
if isinstance(item, str) and not item.isalnum():
return False
return True | 425668986912c9f95d482fc774560524e405a325 | 692,522 |
import re
def rewrite_url(text):
"""
Rewrite the url to something exploitable.
"""
# result = re.sub(
# r"\b((?:https?:\/\/)?(?:www\.)?(?:[^\s.]+\.)+\w{2,4}[^\s.]+)\b",
# r"<a href='\1'>\1</a>", text)
result = re.sub(
r"(pic.twitter.com/[^ ]+)", r"<a href='https://\1'>\1</a>", text)
return result | ddcea5b021eefd63d0042e835c8aed15d92d8c27 | 692,523 |
import sys
import inspect
def linkcode_resolve(domain, info):
"""function for linkcode sphinx extension"""
def find_func():
# find the installed module in sys module
sys_mod = sys.modules[info["module"]]
# use inspect to find the source code and starting line number
names = info["fullname"].split(".")
func = sys_mod
for name in names:
func = getattr(func, name)
source_code, line_num = inspect.getsourcelines(func)
# get the file name from the module
file = info["module"].split(".")[-1]
return file, line_num, line_num + len(source_code) - 1
# ensure it has the proper domain and has a module
if domain != 'py' or not info['module']:
return None
# attempt to cleverly locate the function in the file
try:
file, start, end = find_func()
# stitch together a github link with specific lines
filename = "legwork/{}.py#L{}-L{}".format(file, start, end)
# if you can't find it in the file then just link to the correct file
except Exception:
filename = info['module'].replace('.', '/') + '.py'
return "https://github.com/TeamLEGWORK/LEGWORK/blob/main/{}".format(filename) | e7d85a598d914c93253ad5ad403954aa8c342342 | 692,524 |
def valid_num(val, rule):
"""Default True, check against rule if provided."""
return (rule(val) if callable(rule) else
val == rule if rule else
True) | a03b26e3e8d3ecac9254844b28f0eee846d558e7 | 692,525 |
def prepare_embeddings(embedding_dict):
"""
生成 实体到id的映射的字典,id到实体映射的字典, 实体的嵌入向量的列表格式
:param embedding_dict:
:type embedding_dict:
:return: 实体到id的映射的字典,id到实体映射的字典, 实体的嵌入向量的列表格式
:rtype:
"""
entity2idx = {}
idx2entity = {}
i = 0
embedding_matrix = []
for key, entity in embedding_dict.items(): # key代表每个实体, 例如:'$', entity代表每个实体的嵌入向量,400维度
entity2idx[key.strip()] = i # 实体到id的映射, eg: {'$': 0}
idx2entity[i] = key.strip() # id到实体的映射, eg: {0: '$'}
i += 1
embedding_matrix.append(entity)
return entity2idx, idx2entity, embedding_matrix | e81ebf32691381c2d17f8c6d6b6529ec1d8b413c | 692,527 |
import pickle
import six
def pickle_loader(fileobj):
"""Pickle data file loader. To be used with FileSource.
Arguments:
fileobj {string,file} -- A file object or a string
Returns:
object -- Unpickled object
"""
if isinstance(fileobj, bytes):
data = pickle.loads(fileobj, encoding="latin1")
elif isinstance(fileobj, six.string_types):
with open(fileobj, 'rb') as f:
data = pickle.load(f, encoding="latin1")
elif hasattr(fileobj, 'read'):
data = pickle.load(fileobj, encoding="latin1")
else:
raise ValueError('fileobj is not a filename or a file object')
return data | c3cb80373f7ce93bd509ca8f93bd9cca17759698 | 692,528 |
import sys
def is_64_bit():
""":return: True if the system is 64 bit. Otherwise it can be assumed to be 32 bit"""
return sys.maxsize > (1 << 32) - 1 | a799ee01550110e80592b92404f4bb5c95aa36c5 | 692,529 |
def sum_no_duplicates(numbers):
"""Takes list of numbers and ignores duplicates \
then gets the sum of numbers remaining."""
total = 0
for number in numbers:
result = numbers.count(number)
if result >= 2:
print(number)
else:
total += number
return total | 812edb2b867dde6f414dd963a4e7ef85f76146ed | 692,530 |
def prepare_plot_dict(df, col, main_count):
"""
Preparing dictionary with data for plotting.
I want to show how much higher/lower are the rates of Adoption speed for the current column comparing to base values (as described higher),
At first I calculate base rates, then for each category in the column I calculate rates of Adoption speed and find difference with the base rates.
"""
main_count = dict(main_count)
plot_dict = {}
for i in df[col].unique():
val_count = dict(df.loc[df[col] == i, 'AdoptionSpeed'].value_counts().sort_index())
for k, v in main_count.items():
if k in val_count:
plot_dict[val_count[k]] = ((val_count[k] / sum(val_count.values())) / main_count[k]) * 100 - 100
else:
plot_dict[0] = 0
return plot_dict | 528520b7ab30a4344f2b02c84d1859c0e3c845b6 | 692,531 |
def splitDataSet(dataSet, axis, value):
"""
按照给定特征划分数据集
:param dataSet: 待划分的数据集
:param axis: 划分数据集的特征的维度
:param value: 特征的值
:return: 符合该特征的所有实例(并且自动移除掉这维特征)
"""
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis] # 删掉这一维特征
reducedFeatVec.extend(featVec[axis + 1:])
retDataSet.append(reducedFeatVec)
return retDataSet | 3cfddeeec479e369b35fd4910e2a7cd6e0e7d2f7 | 692,533 |
def recursive_fibonacci(n: int) -> int:
"""
Returns n-th Fibonacci number
n must be more than 0, otherwise it raise a ValueError.
>>> recursive_fibonacci(0)
0
>>> recursive_fibonacci(1)
1
>>> recursive_fibonacci(2)
1
>>> recursive_fibonacci(10)
55
>>> recursive_fibonacci(-2)
Traceback (most recent call last):
...
ValueError: n must be more or equal than 0
"""
if n < 0:
raise ValueError('n must be more or equal than 0')
if n == 0:
return 0
elif n == 1:
return 1
return recursive_fibonacci(n - 1) + recursive_fibonacci(n - 2) | 2a0d6c4980e0e306317a448b96162ecb7ce49fcd | 692,534 |
def bigrams(text):
"""Return a list of pairs in text (a sequence of letters or words).
>>> bigrams('this')
['th', 'hi', 'is']
>>> bigrams(['this', 'is', 'a', 'test'])
[['this', 'is'], ['is', 'a'], ['a', 'test']]
"""
return [text[i:i+2] for i in range(len(text) - 1)] | 86cdfe9b6e6c161c7cb11b89bc86b1ec0b80de1b | 692,535 |
def channel_shuffle(inputs, num_groups):
"""
通道混洗
Args:
inputs ([type]): 输入数据
num_groups ([type]): 分组数量
Returns:
[type]: 返回混洗数据
"""
# 获取形状
N, C, H, W = inputs.size()
# 进行分组和通道交换等操作
out = inputs.view(N, num_groups, C // num_groups, H, W).permute(0, 2, 1, 3, 4).contiguous().view(N, C, H, W)
return out | 6251f84840b67801d2798856cc3a5dadb26b5a5d | 692,536 |
def get_costs(data, costs):
"""
"""
costs = costs.to_dict('records')
lookup = {}
scenarios = set()
strategies = set()
cis = set()
for item in costs:
scenario = item['Scenario']
strategy = item['Strategy'].replace(' ', '')
ci = item['Confidence']
decile = item['Decile']
handle = '{}_{}_{}_{}'.format(scenario, strategy, ci, decile)
lookup[handle] = item['Cost Per User ($)']
scenarios.add(scenario)
strategies.add(strategy)
cis.add(ci)
output = []
data = data.to_dict('records')
for scenario in list(scenarios):
for strategy in list(strategies):
for ci in list(cis):
for item in data:
handle = '{}_{}_{}_{}'.format(scenario, strategy, ci, item['decile'])
cost_per_user = lookup[handle]
output.append({
'GID_id': item['GID_id'],
'GID_level': item['GID_level'],
'GID_0': item['GID_0'],
'scenario': scenario,
'strategy': strategy,
'confidence': ci,
'population': item['population'],
'population_km2': item['population_km2'],
'area_km2': item['area_km2'],
'decile': item['decile'],
'cost_per_user': cost_per_user,
'total_cost': item['population'] * cost_per_user,
})
return output | 5a1d43775bbb94b440384372c0861d7b948c5922 | 692,537 |
def health(self):
"""
The health of the app in the container.
"""
if self.attrs['State'].get('Health') is not None:
return self.attrs['State']['Health']['Status']
else:
return 'none' | 1b7dfc38695d616493f0bd9131b76caf7d495bd7 | 692,538 |
import copy
def make_hash(o):
"""
Makes a hash from a dictionary, list, tuple or set to any level, that contains
only other hashable types (including any lists, tuples, sets, and
dictionaries).
"""
if isinstance(o, (set, tuple, list)):
return tuple([make_hash(e) for e in o])
elif not isinstance(o, dict):
return hash(o)
new_o = copy.deepcopy(o)
for k, v in new_o.items():
new_o[k] = make_hash(v)
return hash(tuple(frozenset(sorted(new_o.items())))) | 8ab3eb3bbfb952d238a1455a46eabd995bd655c9 | 692,539 |
from bs4 import BeautifulSoup
import re
import os
def html_to_text(html):
"""Strip HTML tags from a string.
Parameters
----------
html : |str|
Text containing HTML tags.
Returns
-------
|str|
Text without HTML tags. The string is indented according to
where tags were nested, but blank lines are removed.
Example
-------
>>> html = ('<html><head></head><body><p>Hi!<br>Here is the '
>>> '<a href="https://www.python.org">link</a> you wanted.</p></html>')
>>> html_to_text(html)
Hi!
Here is the
link
you wanted.
"""
html_parser = 'html5lib'
soup = BeautifulSoup(html, html_parser)
pretty_html = soup.prettify()
pretty_soup = BeautifulSoup(pretty_html, html_parser)
text = pretty_soup.get_text()
lines = [s for s in text.splitlines() if not re.search(r'^\s*$', s)]
return os.linesep.join(lines) | b2100eeff361a7bdf9cf59251b8a07ec94d09d23 | 692,540 |
import uuid
def generate_uuid():
"""Generate a unique identifier for a database row.
Lifted from:
https://stackoverflow.com/questions/183042/\
how-can-i-use-uuids-in-sqlalchemy
"""
return str(uuid.uuid4()) | dc18c0e1423d3d8f2177be2b1db3fc604713980c | 692,541 |
def single_pulse_SCPI(
pulsewidth, updown, high_voltage, low_voltage, channel="1", *args, **kwargs
):
"""
Returns SCPI string that can be written to the pulse generator to put it in the correct state to apply a single pulse.
args:
pulsewidth (str): Pulsewidth. i.e. '10ns' allowed units {ns, us, ms, s}
updown (str): Specify polarity. 'up' or 'down'.
high_voltage (str): High voltage of pulse. i.e. '1000mv' allowed units {V, mv}
low_voltage (str): Low voltage of pulse. i.e. '-1000mv' allowed units {V, mv}
channel (str): Specify the output channel. '1' or '2'
"""
if pulsewidth[-2:] not in set(
{
"ns",
"us",
"ms",
}
):
if pulsewidth[-1] != "s":
raise ValueError("pulsewidth " + str(pulsewidth) + " not supported")
if updown not in set({"up", "down"}):
raise ValueError("updown " + str(updown) + " not supported")
if high_voltage[-2:].lower() not in set({"mv"}):
if high_voltage[-1].lower() != "v":
raise ValueError("high_voltage " + str(high_voltage) + " not supported")
if low_voltage[-2:].lower() not in set({"mv"}):
if low_voltage[-1].lower() != "v":
raise ValueError("low_voltage " + str(low_voltage) + " not supported")
if channel not in set({"1", "2"}):
raise ValueError("channel " + str(channel) + " not supported")
if updown == "up":
out = "outp" + channel + ":puls:mode sin;"
out += ":sour" + channel + ":inv off;"
out += ":sour" + channel + ":volt:lev:imm:high " + high_voltage + ";"
out += ":sour" + channel + ":volt:lev:imm:low " + low_voltage + ";"
# puls1 means the first pulse because we are in single mode
out += ":sour" + channel + ":puls1:wid " + pulsewidth + ";"
return out
else:
out = "outp" + channel + ":puls:mode sin;"
out += ":sour" + channel + ":inv on;"
out += ":sour" + channel + ":volt:lev:imm:low " + low_voltage + ";"
out += ":sour" + channel + ":volt:lev:imm:high " + high_voltage + ";"
# puls1 means the first pulse because we are in single mode
out += ":sour" + channel + ":puls1:wid " + pulsewidth + ";"
return out | f24ea735339d140b4b943e5f4a29ae8f89785413 | 692,542 |
def NamesOfDefinedFlags():
"""Returns: List of names of the flags declared in this module."""
return ['tmod_bar_x',
'tmod_bar_y',
'tmod_bar_z',
'tmod_bar_t',
'tmod_bar_u',
'tmod_bar_v'] | fa7db1bf3a2b0a3d56182d451e2bbed3de1f2c3f | 692,543 |
import logging
import json
import requests
def news_api_request() -> dict:
"""Makes an news API request and returns info"""
logging.info('pending news API request')
with open('config.json', 'r') as f: #acceses config.json file
json_file = json.load(f)
keys = json_file["API-keys"] #API key is extracted from config.json
api_key_news = keys["news"]
base_url = "https://newsapi.org/v2/top-headlines?"
country = "gb"
complete_url = base_url + "country=" + country + "&apiKey=" + api_key_news
response = requests.get(complete_url) #requests API information from url
news_json = response.json()
return news_json | eb1a10250b547ae4182e4b22e5051b4fa28f2e53 | 692,544 |
from typing import Optional
import json
def _try_parse_json(json_string: str, ref_val=None) -> Optional[dict]:
"""
Return whether the string can be interpreted as json.
:param json_string: str, string to check for json
:param ref_val: any, not used, interface design requirement
:return None if not parseable, otherwise the parsed json object
"""
parsed = None
try:
parsed = json.loads(json_string)
except (ValueError, TypeError):
pass
return parsed | a609eeefb32d88970ecf039578e8eb8a65ad8108 | 692,545 |
def collapse_words(doc):
""" Collapse a doc to a list of words """
return [ word for part in doc for sent in part for word in sent ] | 9995653b0f457708c5aff3e9f3ac776eb21c02e0 | 692,546 |
import sys
import os
def hilight(textToColor, color, bold):
"""
Used to add highlights to various text for displaying in a terminal
@param textToColor: string, the text to be colored
@param color: string, used to color the text red or green
@param bold: boolean, used to bold the textToColor
@return: Buffered reader containing the modified string.
"""
if(sys.platform.__contains__("win")):
if(color == "red"):
os.system('color 04')
elif(color == "green"):
os.system('color 02')
else:
os.system('color') #reset to default
return textToColor
else:
attr = []
if(color == "red"):
attr.append('31')
elif(color == "green"):
attr.append('32')
else:
attr.append('0')
if bold:
attr.append('1')
else:
attr.append('0')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr),textToColor) | ef6a003b10a7a935887cdea3f3770c2b372d3e85 | 692,547 |
def get_project_palettes(window):
"""Get project palettes."""
data = window.project_data()
if data is None:
data = {}
return data.get('color_helper_palettes', []) | ea27e5e5e7877f08af767e08f2102becde259e3d | 692,548 |
def remove_spaces_from_sentences(sents):
"""
Makes sure every word in the list of sentences has SpaceAfter=No.
Returns a new list of sentences
"""
new_sents = []
for sentence in sents:
new_sentence = []
for word in sentence:
if word.startswith("#"):
new_sentence.append(word)
continue
pieces = word.split("\t")
if pieces[-1] == "_":
pieces[-1] = "SpaceAfter=No"
elif pieces[-1].find("SpaceAfter=No") >= 0:
pass
else:
raise ValueError("oops")
word = "\t".join(pieces)
new_sentence.append(word)
new_sents.append(new_sentence)
return new_sents | a56f2598107c7a4bb29febd7e8b79d1020e2c2fe | 692,549 |
def parse_point(s_point):
"""
Parses a point from the configuration file
Function expects to be passed in a string parameter
for the point value.
Function returns a dictionary list item.
"""
point = {}
p_array = s_point.split(",")
point["x"] = int(p_array[0])
point["y"] = int(p_array[1])
return point | bb01e189cb10158c95ff86d7e6305e18eee3361c | 692,550 |
def get_available_similarity_metrics():
"""Output the available metrics for calculating pairwise similarity"""
return ["pearson", "kendall", "spearman"] | 4e808e5548da796debffdf8ba600d7798a912160 | 692,551 |
def basename(path):
"""Return the base name of pathname *path*. This is the second half of the pair
returned by ``split(path)``. Note that the result of this function is different
from the Unix :program:`basename` program; where :program:`basename` for
``'/foo/bar/'`` returns ``'bar'``, the :func:`basename` function returns an
empty string (``''``)."""
return '' | 72ef86ecf2e77f34af0bfe4b1a001a7c6b8fa653 | 692,552 |
import numpy
def cos2degree(cos) -> float:
"""
Convert cosine to degree. Support float, List[float], numpy.ndarray[float]
:param cos:
:return:
"""
return numpy.arccos(cos) / numpy.pi * 180 | f0f575e48f3b85b78f687b96d00f515892e83401 | 692,553 |
def is_prime(number):
"""The function is checking if the number is prime or not. Source: https://en.wikipedia.org/wiki/Primality_test"""
if number <= 1:
return False
elif number <= 3:
return True
elif number % 2 == 0 or number % 3 == 0:
return False
i = 5
while i * i <= number:
if number % i == 0 or number % (i+2) == 0:
return False
i = i + 6
return True | e70465e5ea9e4bba0d1398831c1730d99f0727bc | 692,554 |
def is_alphabet_or_digit(c):
"""
abc or 123
"""
alphabet = list(u"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
digit = list(u"0123456789.")
if c in alphabet or c in digit:
return True
return False | 7574d72492a00c621056d9d6190ee3db9b54e863 | 692,555 |
def make_anchor(value: str) -> str:
"""
Makes a GitHub-compatible anchor for `value`.
Arguments:
value: Heading to anchor.
Returns:
Anchor.
"""
wip = ""
for c in value:
if str.isalnum(c):
wip += c.lower()
elif c in [" ", "-"]:
wip += "-"
return wip | 29fa8a700922136c9a253da9f7f90febaeedf1bf | 692,556 |
def adjust_contrast(image, contrast_level):
"""Return the image scaled to a certain contrast level in [0, 1].
parameters:
- image: a numpy.ndarray
- contrast_level: a scalar in [0, 1]; with 1 -> full contrast
"""
assert(contrast_level >= 0.0), "contrast_level too low."
assert(contrast_level <= 1.0), "contrast_level too high."
return (1-contrast_level)/2.0 + image.dot(contrast_level) | 967d828b2e8874afb98f3630a5781317d9dc5b1c | 692,557 |
import time
def benchmark(func):
"""Benchmark decorators using time module."""
def run_benchmark(*args, **kwargs):
start = time.time()
end = time.time()
runtime = end - start
value = func(*args, **kwargs)
msg = "Runtime for {func} took {time:.3f} seconds."
print(msg.format(func=func.__name__, time=runtime))
return value
return run_benchmark | b9c159400049cb7e62dcd9822508a5570fb727f3 | 692,558 |
import copy
def get_meta_for_row(cui, configs, row):
"""
get the min of each config
NB: cui is ignored, and returns only the given CUI
- I can't figure out how to make sense of
word-level CUIs...which trumps the other?
"""
text = []
row_configs = copy.copy(configs)
for el in row:
if isinstance(el, (list, tuple)):
row_cui, content, conf = el
row_configs.add(conf)
text.append(content)
if not cui or cui.startswith('G'):
cui = row_cui
else:
text.append(el)
return cui, row_configs, text | b385445ee1ccb822ecc8d7a027f3e41c81e12ba0 | 692,559 |
def _make_mergable_with_params(dist, category):
"""Change the index and Series name to easily merge it to params.
Args:
dist (pandas.Series): distribution of number of contacts. The
index is the support, the values the probabilities.
category (str): name of the contact model to which the distribution
belongs. This is set as the category index level of the
returned Series.
Returns:
pandas.Series: Series with triple index category, subcategory, name.
the name index level is the support. the value column contains
the probabilities.
"""
dist.name = "value"
dist = dist.to_frame()
dist["category"] = category
dist["subcategory"] = "n_contacts"
dist["name"] = dist.index
dist = dist.set_index(["category", "subcategory", "name"], drop=True)
return dist | d7dc9c902a4695c76a6638e037af6a3495eeeaeb | 692,560 |
from datetime import datetime
def date_to_dir(date: datetime) -> str:
"""Function to change from YYYY-MM-DD to YYYYMMDD format (used for directories)."""
return date.strftime("%Y%m%d") | 0050b7ec59d3797aabd93d843ce67f6f985a3a22 | 692,561 |
def _tess_report_paths(file_name):
""" TESS Report File """
# tess2018206190142-s0001-s0001-0000000349518145-01-00106_dvs.pdf
# sssss eeeee zzzzffffppppllll
# 18-23 24-29 30 34 38 42 46
# sssss = file_name[18:23]
eeeee = file_name[24:29]
zzzz = file_name[30:34]
ffff = file_name[34:38]
pppp = file_name[38:42]
llll = file_name[42:46]
parts = [
"tess",
"public",
"tid",
eeeee,
zzzz,
ffff,
pppp,
llll,
file_name
]
return ["/".join(parts)] | db822909f791857b10515a894d7e4a71dd19b66e | 692,562 |
def strip_levels(df, rows=None, columns=None):
"""
Function that strips a MultiIndex DataFrame for specified row and column index
Parameters
----------
df: pandas.DataFrame
rows: int
Row index to remove, default None
columns: int
Column index to remove, default None
Returns
-------
df_strip: pandas.DataFrame
The input dataframe stripped for specified levels
"""
df_strip = df.copy()
if rows is not None:
if df_strip.index.nlevels > 1:
df_strip.index = df_strip.index.droplevel(rows)
if columns is not None:
if df_strip.columns.nlevels > 1:
df_strip.columns = df_strip.columns.droplevel(columns)
return df_strip | 27394c0a92002ee53a0fe7eff3e2170122ce48d4 | 692,564 |
def first_missing_positive(a):
"""
O(n) time but reuses the array for state.
"""
n = len(a)
i = 0
while i < n:
j = a[i]
while 0 < j <= n and a[j-1] != j:
a[j-1], j = j, a[j-1]
i += 1
for i, x in enumerate(a):
if x != i+1:
return i+1 | 9e70f0a5bd90d437d0ae4b5eb860c8921192b2d3 | 692,565 |
def add_prices(basket):
"""The add_prices function returns the total price of all of the groceries in
the dictionary."""
total = 0
for product, prices in basket.items():
total += prices
return round(total, 2) | 5f1c113836c9474ff7ceeb826faa25ba676192c8 | 692,566 |
import io
def read_file(filepath, mode):
"""
The write file.
Args:
filepath: File absolute path.
mode: Read and Write.
txt: text content.
Usage:
read_file('/xxxx/temp.txt', 'r')
Return:
There is no.
"""
with io.open(filepath, mode, encoding='utf-8') as fp:
content = fp.read()
return content | 5177b1718f225c243d897817ec647e4a637c11ed | 692,567 |
def may_develop_severe_illness(age, sex, rng):
"""
Likelihood of getting really sick (i.e., requiring hospitalization) from Covid-19
Args:
age ([int]): [description]
sex ([int]): [description]
rng ([RandState]): [description]
Returns:
Boolean: returns True if this person would likely require hospitalization given that they contracted Covid-19
"""
# age < 10 < 20 < 30 < 40 < 50 < 60 < 70 < 80 < 90 default
female = [0.02, 0.002, 0.05, 0.05, 0.13, 0.18, 0.16, 0.24, 0.17, 0.03]
male = [0.002, 0.02, 0.03, 0.07, 0.13, 0.17, 0.22, 0.22, 0.15, 0.03]
other = [0.02, 0.02, 0.04, 0.07, 0.13, 0.18, 0.24, 0.24, 0.18, 0.03]
hospitalization_likelihood = other
if sex.lower().startswith('f'):
hospitalization_likelihood = female
elif sex.lower().startswith('m'):
hospitalization_likelihood = male
if age > 90:
age = 90
index_of_age_category = (age - (age % 10))//10 # round down to nearest 10, then floor divide by 10
return rng.rand() < hospitalization_likelihood[index_of_age_category] | 23322ca333c32a090380cdbed54827812c9e3ac3 | 692,568 |
def get_artists(tracks):
"""
Returns a dict where:
key: artist_id
value: list of track ids
"""
artists = {}
for _,row in tracks.iterrows():
artist_id = row['artist_id']
if artist_id in artists:
artists[artist_id].append(row['track_id'])
else:
artists[artist_id] = [row['track_id']]
return artists | 9b7136e5c6e3838d11d8defe8d038917224423ce | 692,569 |
def is_prime(number: int) -> bool:
"""
Checks if `number` is a prime number.
Parameters
----------
number :
The number to check for primality.
Returns
-------
is_number_prime : `bool`
Boolean indicating if `number` is prime or not.
"""
if number <= 1:
return False
# number ** 0.5 is faster than math.sqrt(number)
for x in range(2, int(number**0.5) + 1):
# Return False if number is divisible by x
if number % x == 0:
return False
return True | 1dc99526a4361fafdaa048a1b21ff7ce53cf36c6 | 692,570 |
import torch
def MNLLLoss(logps, true_counts):
"""A loss function based on the multinomial negative log-likelihood.
This loss function takes in a tensor of normalized log probabilities such
that the sum of each row is equal to 1 (e.g. from a log softmax) and
an equal sized tensor of true counts and returns the probability of
observing the true counts given the predicted probabilities under a
multinomial distribution. Can accept tensors with 2 or more dimensions
and averages over all except for the last axis, which is the number
of categories.
Adapted from Alex Tseng.
Parameters
----------
logps: torch.tensor, shape=(n, ..., L)
A tensor with `n` examples and `L` possible categories.
true_counts: torch.tensor, shape=(n, ..., L)
A tensor with `n` examples and `L` possible categories.
Returns
-------
loss: float
The multinomial log likelihood loss of the true counts given the
predicted probabilities, averaged over all examples and all other
dimensions.
"""
log_fact_sum = torch.lgamma(torch.sum(true_counts, dim=-1) + 1)
log_prod_fact = torch.sum(torch.lgamma(true_counts + 1), dim=-1)
log_prod_exp = torch.sum(true_counts * logps, dim=-1)
return -log_fact_sum + log_prod_fact - log_prod_exp | b06ca242884a83c0076ffa5bb4d7188b9e184d0a | 692,571 |
from typing import Optional
def pkcs7pad(bs: bytes, blocksize: Optional[int] = None) -> bytes:
"""
S2C09 - Implement PKCS#7 padding
https://cryptopals.com/sets/2/challenges/9
A block cipher transforms a fixed-sized block (usually 8 or 16 bytes) of plaintext into ciphertext.
But we almost never want to transform a single block;
we encrypt irregularly-sized messages.
One way we account for irregularly-sized messages is by padding,
creating a plaintext that is an even multiple of the blocksize.
The most popular padding scheme is called PKCS#7.
So: pad any block to a specific block length,
by appending the number of bytes of padding to the end of the block.
"""
if blocksize is None:
blocksize = 16
l = len(bs)
missing = l % blocksize
numpad = blocksize - missing
return bs + bytes([numpad])*numpad | d9066dd583280da6af162dab859bebfbfc5e7f81 | 692,572 |
import sqlite3
def returnValueDB(db_path, table, column, criteria_column, criteria):
"""
Return a single value from a database table.
Input:
`db_path`(str): Full path to database.
`table`(str): Name of table to retrieve from.
`column`(str): Column to retrieve from.
`criteria_column`(str): Name of column to select row by. Example: 'ID'
`cirteria`: criteria value in the specified column. Example: '2' (where ID=2)
Returns:
Retrieved value from selected table, column and row.
WARNING: The function does not check if the input is valid.
"""
conn = sqlite3.connect(db_path)
with conn:
cursor = conn.cursor()
cursor.execute("SELECT {} FROM {} WHERE {}='{}'".format(column,table,
criteria_column,
criteria))
try:
row = cursor.fetchone()[0]
return row
except Exception as e:
print('Error, could not select a valid value from sqlite3 db')
print('Error message: ', e) | 3ae714b25dd6e2502198a1d91f77142d7cb8de78 | 692,574 |
import glob
import os
def find_expected_agent_names(flume_conf_directory):
"""
Gets the names of the flume agents that Ambari is aware of.
:param flume_conf_directory: the configuration directory (ie /etc/flume/conf)
:return: a list of names of expected flume agents
"""
files = glob.glob(flume_conf_directory + os.sep + "*/ambari-meta.json")
expected = []
for f in files:
expected.append(os.path.dirname(f).split(os.sep).pop())
return expected | 632683af48f4b20ddcc201dc6a6b94127703a23e | 692,575 |
def split_host(host):
"""
Splits host into host and port.
:param str host:
Host including port.
:returns:
A ``(str(host), int(port))`` tuple.
"""
host, port = (host.split(':') + [None])[:2]
return host, int(port) | d1faec745fccd85b4e6c34e0bbb5ca84b67a6ec3 | 692,576 |
def isSubsequence(self, s_string, t_string):
"""
:type s: str
:type t: str
:rtype: bool
"""
idx = 0
for s in s_string:
idx = t_string.find(s, idx) # Find first occurence of each char in s in t_string[idx:]
if idx == -1: return False
else: idx += 1
return True | 282635ec0a1199f46430c3c501531fd1a206c691 | 692,577 |
def lerp_np(x, y, w):
"""Helper function."""
fin_out = (y - x) * w + x
return fin_out | ebd636e0b5ba580a443639592be7b3064b527288 | 692,578 |
def generate_single_type_function_pointer_typedefs(functions):
"""
Generate typedef for function pointers:
typedef return_type (*tMyFunction)(arguments).
"""
lines = []
for function in functions:
line = "typedef {} (*t{}) ({});" . format(
function.return_type,
function.name,
", ".join(str(arg) for arg in function.arguments))
lines.append(line)
return lines | a5ce49670cbce2107585381b8c7f9ba30b7adbd5 | 692,579 |
def GetPw(abs_hum, elevation):
"""
水蒸気分圧[kPa]を計算する
Parameters
----------
abs_hum : float
絶対湿度[kg/kg']
elevation : float
標高[m]
Returns
----------
Pw: float
水蒸気分圧[kPa]
"""
# Po = 101.325 #標準大気圧[kPa], 標高0m
Po = 1013.2 - 0.12 * elevation + 5.44 * 10**(-6) * elevation ** 2
Po = Po/10.0 #[hPa]->[kPa]換算
pw = (abs_hum * Po)/(abs_hum + 0.62198)
return pw | 445f85c5491c248bd4a0ce6d19628a77eab6b555 | 692,580 |
def binary_search(i, l):
"""
Checks if an item exist in a list using a non-recursive implementation of binary search
i -- item to search for
l -- item to search in
"""
first = 0
last = len(l) - 1
found = False
while first <= last and found is False:
midpoint = (first + last) // 2
if l[midpoint] == i:
return True
else:
if i < l[midpoint]:
last = midpoint-1
else:
first = midpoint+1 | 5d795544890151452ddfd99f373a0516742cbc1a | 692,581 |
import re
def absolute_links(string, base):
"""Replace all relative Markdown links by absolute links"""
base = base.rstrip("/")
return re.sub(
r"\[(.+?)\]\(([^:]+?)\)", # Every [text](link) without ":" in link
f"[\\1]({base}/\\2)", # Replace link by base/link
string
) | 50feab142d11a4bf6fbd55c1345a1eaf19ad89dd | 692,583 |
def message(method):
"""
Decorator for actor methods that turn them into messages.
"""
def decorator(self, *args, **kwargs):
if self._mailbox:
self._mailbox.push([method, args, kwargs, None])
return decorator | 23bdb86541883cc06de52a57416ddf168284d06b | 692,584 |
def check_set_number(value, typ, default=None, minimum=None, maximum=None):
""" Checks if a value is instance of type and lies within permissive_range if given. """
if value is None:
return default
if not isinstance(value, typ):
try:
value = typ(value)
except:
raise TypeError("Incompatible type: Expected {0}, got {1}.".format(typ, type(value)))
if minimum is not None:
if value < minimum:
raise ValueError("Value must be larger than {}.".format(minimum))
if maximum is not None:
if value > maximum:
raise ValueError("Value must be smaller than {}.".format(maximum))
return value | fbb808af3247db7ca84df9c71ac414fef40c70ef | 692,585 |
def decode(obj, encoding=None, errors=None): # real signature unknown; restored from __doc__
"""
decode(obj, [encoding[,errors]]) -> object
Decodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle ValueErrors.
"""
return object() | b996cc134c3c44374ebbb11d20385720e43a477c | 692,586 |
import pathlib
def create_output_file_path(filepath_output):
"""
Create output file and returns it's pathlib.Path instance.
"""
# Convert filepath from string to pathlib.Path instance.
path = pathlib.Path(filepath_output)
# Create parent directory if not exists.
if not path.parent.exists():
path.parent.mkdir(exist_ok=True)
# Raise error if the parent directory is not a directory (e.g. regular file).
if not path.parent.is_dir():
raise RuntimeError("failed to create output directory")
return path | 7285d3deab8550b79921c81f8a4ef268522eb114 | 692,587 |
def kthToLast(head, index):
"""
返回倒数第K个节点,快慢指针
"""
cur1 = head
cur2 = head
for i in range(index):
cur2 = cur2.next
while cur2:
cur2 = cur2.next
cur1 = cur1.next
return cur1.val | 85946b0aa873eb2b9473e2283dffaf328d4c901d | 692,588 |
import os
def __get_possible_paths(path):
"""
Get all the possible paths from a base path
"""
possibilities = set()
if "*" not in path:
return {path}
base_path = path.split("*")[0][:-1]
for entry in os.listdir(base_path):
full_path = os.path.join(base_path, entry, "*".join(path.split("*")[1:])[1:])
if os.path.exists(full_path):
possibilities.add(full_path)
return possibilities | 67d5be3d98766671ab9d5c8d9aeaa69c765e9cfe | 692,589 |
def opt_err_func(params, x, y, func):
"""
Error function for fitting a function using non-linear optimization.
Parameters
----------
params : tuple
A tuple with the parameters of `func` according to their order of
input
x : float array
An independent variable.
y : float array
The dependent variable.
func : function
A function with inputs: `(x, *params)`
Returns
-------
float array
The marginals of the fit to x/y given the params
"""
return y - func(x, *params) | 58d6d4b4cadc95e683f8a431a7df912148760c92 | 692,590 |
import os
def normalize_gpus(gpus, setEnviron = False):
""" Given a set of gpus passed from the command line, convert that into
something that's consistent with the setting of CUDA_VISIBLE_DEVICES.
Throws an exception if the output length of the gpu list is less than
the input length.
"""
if 'CUDA_VISIBLE_DEVICES' not in os.environ:
return gpus
envString = os.environ['CUDA_VISIBLE_DEVICES']
if not envString:
return gpus
# If there's no setting, just accept the list we were given.
originalLen = len(gpus)
visible = envString.split(',')
if len(visible) < originalLen:
raise ValueError("Command line specified more GPUs than are available via CUDA_VISIBLE_DEVICES")
newGpus = []
for x in gpus:
if x < len(visible):
newGpus.append(visible[x])
else:
raise ValueError("Command line GPU {} is outside visible range".format(x))
if setEnviron:
newEnvString = ','.join([str(x) for x in newGpus])
os.environ['CUDA_VISIBLE_DEVICES'] = newEnvString
return newGpus | f4a7e8701b269b4c3876fa3e875d96b6683680aa | 692,591 |
def _minutes_to_seconds(time):
"""Convert time: minutes to seconds"""
return time * 60.0 | 319d0a39f94bded468a4f27c55b8c80201724d30 | 692,592 |
def _int2str(label, str_vars):
"""Replace integers with string values for string variables.
@param label: mapping from variable names, to integer (as strings)
@type label: C{dict}
@param str_vars: mapping that defines those variables that
should be converted from integer to string variables.
Each variable is mapped to a list of strings that
comprise its range. This list defines how integer values
correspond to string literals for that variable.
@type str_vars: C{dict}
@rtype: C{dict}
"""
label = dict(label)
label.update({k: str_vars[k][int(v)]
for k, v in label.items()
if k in str_vars})
return label | 63b5516f91fafe9411be64a5ee39eef7c1570f7c | 692,593 |
import re
def _stage_from_version(version):
"""return "prd", "stg", or "dev" for the given version string. A value is always returned"""
if version:
m = re.match(r"^(?P<xyz>\d+\.\d+\.\d+)(?P<extra>.*)", version)
if m:
return "stg" if m.group("extra") else "prd"
return "dev" | 1d72d7ef53d2489f2fc909e0d2a61c5ea1abf013 | 692,594 |
def cd1(rbm_w, visible_data):
"""
This is an implementation of Contrastive Divergence gradient estimator with 1 full Gibbs update, a.k.a. CD-1.
<rbm_w> is a matrix of size <number of hidden units> by <number of visible units>
<visible_data> is a (possibly but not necessarily binary) matrix of size <number of visible units> by <number of data cases>
The returned value is the gradient approximation produced by CD-1 (Contrastive Divergence 1). It's of the same shape as <rbm_w>.
"""
raise NotImplementedError
return ret | 5cd66ca15440afa548af2351de5bddbf44ab0379 | 692,595 |
def endx(eta,merger_type):
"""
Gives ending value/upper boundary for integration of post-Newtonian
parameter, based on Buskirk et al. (2019) equation 23.
Parameters
----------
eta: float
Symmetric mass ratio of the binary, can be obtained from
get_M_and_eta().
merger_type: string
'BH' for a BH-BH merger, 'NS' for a BH-NS or NS-NS merger
Returns
-------
value: float
The ending value for the post-Newtonian integration.
"""
#input type checking
assert type(eta) == float, 'eta should be a float.'
if merger_type == 'BH':
value = (1/3)*(1 + (7/18)*eta)
#Buskirk eq. 23, with 1/6 -> 1/3 to ensure overlap with merger portion
#for the matching script
elif merger_type == 'NS':
value = (1/6)*(1 + (7/18)*eta) #Buskirk eq. 23
#1/6 because 3RSch is the ISCO for BH-NS and approx. the touching point
#for NS-NS, so the end condition for both
else:
raise ValueError('merger_type must be either \'BH\' for BH-BH or '
'\'NS\' for BH-NS or NS-NS.')
return value | 972f292ed8e2d8cdd5d7e400866b588e7a531db9 | 692,597 |
def one_hot(label, all_labels):
"""One hot encodes a label given all labels"""
one_hot_arr = [0 for _ in range(len(all_labels))]
for i, label_i in enumerate(all_labels):
if label == label_i:
one_hot_arr[i] = 1
return one_hot_arr | 8060c26783d80933cdb628fdc319dc86635adc80 | 692,598 |
def read_wbo(file):
"""
Read wbo output file created from XTB option. Return data.
"""
if file.find(".wbo") > -1:
f = open(file, "r")
data = f.readlines()
f.close()
bonds, wbos = [], []
for line in data:
item = line.split()
bond = [int(item[0]), int(item[1])]
wbo = float(item[2])
bonds.append(bond)
wbos.append(wbo)
return bonds, wbos | bdc6b59e5515f55a1e64047bfafe1f66884ea44b | 692,600 |
import pickle
def load_cachedfilestring(cache_folder, filename):
"""
Loads the file string that has been previously cached in the cache folder.
Args:
cache_folder: A string representing the path of the cache folder.
filename: A string representing the name of the file that is being loaded.
Returns:
The file string that loaded from the cache folder
(returns an empty string if there is no string to load).
"""
try:
file_string = pickle.load(open(cache_folder + filename, 'rb'))
return file_string
except:
return "" | 83a840d4ebea6f895ac6df5d9d7d6970c3742a5a | 692,601 |
def calculate_volumes(args, sample_concentration):
"""
Calculates volumes for dilution and distribution of sample.
Returns a list of tuples consisting of [(uL of sample to dilute, uL of water for dilution),
(uL of diluted sample in reaction, uL of water in reaction)]
@param args:
@param sample_concentration:
@return:
"""
# reagent_volume = float(getattr(args, "ReagentVolume", 0))
template_in_reaction = float(args.DNA_in_Reaction)
reagent_volume = float(args.PCR_Volume)*0.5
# Max Template Concentration is used to keep volumes > 1 uL
# max_template_concentration = template_in_reaction*0.9
if getattr(args, "ReagentVolume", None):
reagent_volume = float(getattr(args, "ReagentVolume"))
max_template_vol = round(float(args.PCR_Volume)-reagent_volume, 1)
# Get the minimum template concentration per uL allowed.
# min_template_concentration = template_in_reaction/max_template_vol
# If template concentration per uL is less than desired template in reaction then no dilution is necessary.
# if sample_concentration <= max_template_concentration:
# If at least 2 uL of sample is needed then no dilution is necessary
if template_in_reaction/sample_concentration >= 2:
sample_vol = round(template_in_reaction/sample_concentration, 2)
return sample_vol, 0, 0, max_template_vol-sample_vol, max_template_vol
# This will test a series of dilutions up to a 1:200.
for i in range(50):
dilution = (i+1)*2
diluted_dna_conc = sample_concentration/dilution
# if max_template_concentration >= diluted_dna_conc >= min_template_concentration:
# Want to pipette at least 2 uL of diluted sample per well
if 2 <= template_in_reaction/diluted_dna_conc <= max_template_vol:
diluted_sample_vol = round(template_in_reaction/diluted_dna_conc, 2)
reaction_water_vol = max_template_vol-diluted_sample_vol
return 1, dilution - 1, diluted_sample_vol, reaction_water_vol, max_template_vol | c6d57a69489ce16d73474b0dbfa16e93833d4f72 | 692,602 |
import hashlib
def _get_file_id(data, mimetype=None):
"""
Parameters
----------
data : bytes
Content of media file in bytes. Other types will throw TypeError.
mimetype : str
Any string. Will be converted to bytes and used to compute a hash.
None will be converted to empty string. [default: None]
"""
if mimetype is None:
mimetype = ""
# Use .update() to prevent making another copy of the data to compute the hash.
filehash = hashlib.sha224(data)
filehash.update(bytes(mimetype.encode("utf-8")))
return filehash.hexdigest() | cdc4e91367a2ae498416524dc2508468fda08358 | 692,603 |
def split_ids(ids, n=2):
"""将每个id拆分为n个,为每个id创建n个元组(id, k)"""
# 等价于for id in ids:
# for i in range(n):
# (id, i)
# 得到元祖列表[(id1,0),(id1,1),(id2,0),(id2,1),...,(idn,0),(idn,1)]
# 这样的作用是后面会通过后面的0,1作为utils.py中get_square函数的pos参数,pos=0的取左边的部分,pos=1的取右边的部分
return ((id, i) for id in ids for i in range(n)) | 594cdad1d64ce9bac5a4ed384eb9a6b613dcfad2 | 692,604 |
def insert_statement(table_name, columns, data=None):
"""
Generates an INSERT statement for given `table_name`.
:param str table_name: table name
:param tuple columns: tuple of column names
:param data: dict of column name => value mapping
:type data: dict or None
:return: SQL statement template suitable for sqlalchemy.execute()
:rtype: str
"""
data = {} if data is None else data
columns_list = []
values_list = []
for column in columns:
if column not in data:
continue
columns_list.append(column)
values_list.append(":{column}".format(column=column))
return "INSERT INTO {table_name} ({columns_list}) VALUES ({values_list})".format(
table_name=table_name,
columns_list=', '.join(columns_list),
values_list=', '.join(values_list)
) | ef643bde991b1fd6d9a5772e90a4b6dcf021b4b2 | 692,605 |
import numbers
def torch_data_sum(x):
"""
Like ``x.data.sum()`` for a ``torch.autograd.Variable``, but also works
with numbers.
"""
if isinstance(x, numbers.Number):
return x
return x.data.sum() | 9243fcbbb7ff3a04f998a07d1ebc05a6ced97259 | 692,606 |
def batchify(array: list, bs: int = 1, generator: bool = True):
"""Convert any iterable into a list/generator with batch size `bs`"""
def list_to_batch(array, bs):
n = len(array)
for i in range(0, n, bs):
batch = array[i : i + bs]
yield batch
if generator:
return list_to_batch(array, bs)
else:
return list(list_to_batch(array, bs)) | 0393640a55ff98bb9ce16a4fe3daac1951206aa7 | 692,608 |
import requests
def get(url: str) -> bytes:
"""Get SIC 2007 structure from ONS hosted excel file."""
response = requests.get(url)
response.raise_for_status()
return response.content | a40846b3909dcb8d0261fb52efb1371bd0309696 | 692,609 |
def get_frame_prediction(d):
"""Get class name and index from the whole dictionary
E.g.,
'{'index': '...', 'prediction': [{'class': 'sp_sfb', 'position': ..., 'score': ...}]}], 'label': ...}'
==> (dict) {'class': 'sp_sfb', 'position': ..., 'score': ...}
"""
return d['prediction'][0] | 203cfdc5b4ecc438e16052b9859b57196bd5803f | 692,610 |
def get_vis_params(collection):
"""A fucntion to get the visualiztions paramters of GEE ImageCollection Feature
Args:
collection ([ImageCollection]): [GEE ImageColelction]
Returns:
[dict]: [Visualization paramters ]
"""
min = float(collection.getInfo()['properties']['visualization_0_min'])
max = float(collection.getInfo()['properties']['visualization_0_max'])
bands = collection.getInfo(
)['properties']['visualization_0_bands'].split(',')
return {'min': min, 'max': max, 'bands': bands} | ce5b0f0222f00375721991bbb2df81658f662bef | 692,611 |
from typing import Tuple
def get_grid(n: int) -> Tuple[int, int]:
"""Gets the number of rows and columns needed according the number of subplots."""
rows = (n + 2) // 3
if n <= 3:
cols = n
elif n == 4:
cols = 2
else:
cols = 3
return rows, cols | 0301ddc062157212919382d5c7067d96657ed257 | 692,612 |
def df2bytes(dataframe):
"""Convert pandas.DataFrame to bytes csv.
:param pandas.DataFrame dataframe: dataframe to convert
:return: bytes of csv
:rtype: bytes
"""
return '\n'.join(
[','.join(dataframe), ] +
[','.join(map(str, row)) for row in dataframe.values]
).encode() | ab30dbb5d0cf6508b0a40a2d2faa06382d5b3714 | 692,613 |
def row_col_indices_from_flattened_indices(indices, num_cols):
"""Computes row and column indices from flattened indices.
Args:
indices: An integer tensor of any shape holding the indices in the flattened
space.
num_cols: Number of columns in the image (width).
Returns:
row_indices: The row indices corresponding to each of the input indices.
Same shape as indices.
col_indices: The column indices corresponding to each of the input indices.
Same shape as indices.
"""
# Avoid using mod operator to make the ops more easy to be compatible with
# different environments, e.g. WASM.
row_indices = indices // num_cols
col_indices = indices - row_indices * num_cols
return row_indices, col_indices | 8b56ab9a63a4edb929d5ba0a6bc0d52da61939f8 | 692,614 |
def get_item(obj, key):
"""
Obtain an item in a dictionary style object.
:param obj: The object to look up the key on.
:param key: The key to lookup.
:return: The contents of the the dictionary lookup.
"""
try:
return obj[key]
except KeyError:
return None | b27170c6df98aac61ff133542597a4f369f0660c | 692,615 |
import json
def build_texts_from_synopsis(path_to_movie_dat):
"""
Extracts genre text from Movie Lens ratings joined with synopsis data
to create semantic embeddings.
:param path_to_movie_dat:
:return: dict of text list keyed by movie_id
"""
texts = {}
with open(path_to_movie_dat, "r", encoding="ISO-8859-1") as f:
for line in f:
input = json.loads(line)
imdb_id = input['imdb_id']
synopsis = input['plot_synopsis']
texts[imdb_id] = [synopsis]
return texts | 650e61194af5f13060c46af0c61f768dfbb0fef4 | 692,616 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.