content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def destr(screenString):
"""
should return a valid screen object
as defined by input string
(think depickling)
"""
#print "making screen from this received string: %s" % screenString
rowList = []
curRow = []
curAsciiStr = ""
curStr = ""
for ch in screenString:
if ch == '\n':
# then we are done with the row and append it
# and start a new row
rowList.append(curRow)
curRow = []
elif ch == '|':
# then we're ready to make our current asciipixel
curAsciiPixel = AsciiPixel(int(curAsciiStr), int(curStr))
curAsciiStr = curColorStr = ""
curRow.append(curAsciiPixel)
curStr = ""
elif ch == ',':
# then we're now building the color string
curAsciiStr = curStr[:]
curStr = ""
else:
curStr += ch
ret = Screen(rowList)
return ret
|
38f540b3e8f6a16d2dbe7519ea5a43cbf2432b55
| 3,645,420
|
def analytical_solution_with_penalty(train_X, train_Y, lam, poly_degree):
"""
加惩罚项的数值解法
:param poly_degree: 多项式次数
:param train_X: 训练集的X矩阵
:param train_Y: 训练集的Y向量
:param lam: 惩罚项系数
:return: 解向量
"""
X, Y = normalization(train_X, train_Y, poly_degree)
matrix = np.linalg.inv(X.T.dot(X) + lam * np.eye(X.shape[1])).dot(X.T).dot(Y)
w_result = np.poly1d(matrix[::-1].reshape(poly_degree + 1))
# print("w result analytical")
# print(w_result)
return w_result
|
30f81cd74622889df64d6e67f023f67b3149504a
| 3,645,421
|
def formule_haversine(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
"""
Description:
Calcule la distance entre deux points par la formule de Haversine.
Paramètres:
lat1: {float} -- Latitude du premier point.
lon1: {float} -- Longitude du premier point.
lat2: {float} -- Latitude du second point.
lon2: {float} -- Longitude du second point.
Retourne:
{float} -- Distance entre les deux points.
Exemple:
>>> formule_haversine(0, 0, 1, 1)
157.24938127194397
"""
EARTH_RADIUS = 6371e3
dLat = radians(lat2 - lat1)
dLon = radians(lon2 - lon1)
lat1 = radians(lat1)
lat2 = radians(lat2)
a = sin(dLat/2)**2 + cos(lat1) * cos(lat2) * sin(dLon/2)**2
c = 2 * atan2(sqrt(a), sqrt(1-a))
return (EARTH_RADIUS * c) / 1000
|
03ac0c191aa17b9f20944a7de56febba77db1edc
| 3,645,422
|
def get_word_combinations(word):
"""
'one-two-three'
=>
['one', 'two', 'three', 'onetwo', 'twothree', 'onetwothree']
"""
permutations = []
parts = [part for part in word.split(u'-') if part]
for count in range(1, len(parts) + 1):
for index in range(len(parts) - count + 1):
permutations.append(u''.join(parts[index:index+count]))
return permutations
|
5a4c042cc0f3dedb297e2513bf638eac4278e0a6
| 3,645,423
|
import tempfile
def env_to_file(env_variables, destination_path=None, posix=True):
"""
Write environment variables to a file.
:param env_variables: environment variables
:param destination_path: destination path of a file where the
environment variables will be stored. the
stored variables will be a bash script you can
then source.
:param posix: false if the target of the generated file will be a
windows machine
"""
if not env_variables:
return None
if not destination_path:
destination_path = tempfile.mkstemp(suffix='env')[1]
if posix:
linesep = '\n'
else:
linesep = '\r\n'
with open(destination_path, 'w') as f:
if posix:
f.write('#!/bin/bash')
f.write(linesep)
f.write('# Environmnet file generated by Cloudify. Do not delete '
'unless you know exactly what you are doing.')
f.write(linesep)
f.write(linesep)
else:
f.write('rem Environmnet file generated by Cloudify. Do not '
'delete unless you know exactly what you are doing.')
f.write(linesep)
for key, value in env_variables.iteritems():
if posix:
f.write('export {0}={1}'.format(key, value))
f.write(linesep)
else:
f.write('set {0}={1}'.format(key, value))
f.write(linesep)
f.write(linesep)
return destination_path
|
c242ff4d6956922b2ccceecaef5b95640116e75a
| 3,645,424
|
def _phase_norm(signal, reference_channel=0):
"""Unit normalization.
Args:
signal: STFT signal with shape (..., T, D).
Returns:
Normalized STFT signal with same shape.
"""
angles = np.angle(signal[..., [reference_channel]])
return signal * np.exp(-1j * angles)
|
f4e9021f8942bebf97d35e529068792b7f956425
| 3,645,425
|
def maintenance_():
"""Render a maintenance page while on maintenance mode."""
return render_template("maintenance/maintenance.html")
|
61b95cdeb1a16f216a60330d7501e5270e1342ba
| 3,645,426
|
def CanEditHotlist(effective_ids, hotlist):
"""Return True if a user is editor(add/remove issues and change rankings)."""
return any([user_id in (hotlist.owner_ids + hotlist.editor_ids)
for user_id in effective_ids])
|
dc29c74e2628930faffb12b6772046564ffb8218
| 3,645,427
|
from desimodel.io import load_fiberpos, load_target_info
def model_density_of_sky_fibers(margin=1.5):
"""Use desihub products to find required density of sky fibers for DESI.
Parameters
----------
margin : :class:`float`, optional, defaults to 1.5
Factor of extra sky positions to generate. So, for margin=10, 10x as
many sky positions as the default requirements will be generated.
Returns
-------
:class:`float`
The density of sky fibers to generate in per sq. deg.
"""
fracsky = load_target_info()["frac_sky"]
nfibers = len(load_fiberpos())
nskies = margin*fracsky*nfibers
return nskies
|
a50111f51c2ce081c3379e2b5506912326fafb55
| 3,645,428
|
def dice_counts(dice):
"""Make a dictionary of how many of each value are in the dice """
return {x: dice.count(x) for x in range(1, 7)}
|
427703283b5c0cb621e25f16a1c1f2436642fa9f
| 3,645,429
|
def SynthesizeData(phase, total_gen):
""" Phase ranges from 0 to 24 with increments of 0.2. """
x_list = [phase]
y_list = []
while len(x_list) < total_gen or len(y_list) < total_gen:
x = x_list[-1]
y = sine_function(x=x, amp=amp, per=per, shift_h=shift_h, shift_v=shift_v)
x_list.append(y+x)
y_list.append(y)
x_list = x_list[:-1]
return x_list, y_list
|
e656767f7ebf13575571b5eb0592a0e11cbbfcf7
| 3,645,430
|
from pathlib import Path
import difflib
from datetime import datetime
def compare():
""" Eats two file names, returns a comparison of the two files.
Both files must be csv files containing
<a word>;<doc ID>;<pageNr>;<line ID>;<index of the word>
They may also contain lines with additional HTML code (if the
output format is html):
<h3>Document 1</h3>
"""
if request.method == 'GET':
return "html"
elif request.method == 'POST':
# Get the JSON payload of the request containing the two file names
payload = request.get_json()
if payload['format'] == "html":
# Read input data, i.e. both input files (CSV) from disk:
dumping_path = Path(current_app.config["CACHE_PATH"]) # \Dokumente\Synchronisation\Programmieren\Python\tutorial_flask_wsgi\instance\cache
filename1 = Path(dumping_path, payload['files'][0])
filename2 = Path(dumping_path, payload['files'][1])
o = openfile(filename1)
e = openfile(filename2)
balance_tokens(o, e)
data1 = prepare_for_diff(o)
data2 = prepare_for_diff(e)
# Use difflib to find the differences:
print("ANALYZER: searching for differences (with difflib) ...")
d = difflib.Differ()
delta = d.compare(data1, data2)
delta = [*delta] # convert generator to list
pairs = prepare_output(delta, o,e)
filtered = filter_false_positives(pairs)
html = export_to_html(filtered,
original_document=o[0]['document'],
censored_document=e[0]['document'])
dumping_path = Path(current_app.config["CACHE_PATH"])
timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
filename = f"differences,{o[0]['document']}_vs_{e[0]['document']},{timestamp}.html"
savename = Path(dumping_path, filename)
try:
with open(savename, "w", encoding="utf-8") as f:
f.write(html)
except:
pass
return html
elif payload['format'] == "raw_diff":
# Read input data, i.e. both input files (CSV) from disk:
dumping_path = Path(current_app.config["CACHE_PATH"])
filename1 = Path(dumping_path, payload['files'][0])
filename2 = Path(dumping_path, payload['files'][1])
o = openfile(filename1)
e = openfile(filename2)
balance_tokens(o, e)
data1 = prepare_for_diff(o)
data2 = prepare_for_diff(e)
# Use difflib to find the differences:
print("ANALYZER: searching for differences (with difflib) ...")
d = difflib.Differ()
delta = d.compare(data1, data2)
delta = [*delta] # convert generator to list
pairs = prepare_output(delta, o,e)
filtered = filter_false_positives(pairs)
output = serialize_diff_pairs(filtered)
output["original"]["docTitle"] = o[0]['document']
output["censored"]["docTitle"] = e[0]['document']
output["message"] = "Success! Use the censorship inspector to process the output."
print("ANALYZER: Done! Sending JSON to client.")
return jsonify(output)
elif payload['format'] == "TRACER":
""" The TRACER data is already formatted correctly in the TSV files.
The only thing we have to do here is to replace the "XX" place holders
at the beginning of every line with a two digit number representing
the no. of the document. """
dumping_path = Path(current_app.config["CACHE_PATH"])
output = []
docs = []
docnr = 10
for file in payload['files']:
infile = Path(dumping_path, file)
with open(infile, "r", encoding="utf-8") as f:
lines = f.readlines()
for idx, line in enumerate(lines):
output.append(f"{docnr}{line[2:]}")
if idx == 0: # get the document identifier of the first line
docs.append(line.split("\t")[-1].strip())
docnr += 1
timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S')
filename = f"tracer_{','.join([str(x) for x in docs])}_{timestamp}.txt"
savename = Path(dumping_path, filename)
print(f"ANALYZER: Trying to write {savename}")
try:
print("ANALYZER: Sucess!")
with open(savename, "w", encoding="utf-8") as f:
f.writelines(output)
return jsonify(message = f'Success! You can download the exported file under /download/{savename}',
links = [{'href': f'/download/{savename}',
'rel': 'download',
'type': 'GET'}]), 200
except:
print(f"ERROR: Analyzer: Could not write file {savename}")
return jsonify(message = f"ERROR: Analyzer: Could not write file {savename}",
links = [{'href': "error",
'rel': 'download',
'type': 'GET'}]), 500
|
f6aa0421e84cf9d97a211904e64bd793ff7e989e
| 3,645,431
|
def draw_transform(dim_steps, filetype="png", dpi=150):
"""create image from variable transormation steps
Args:
dim_steps(OrderedDict): dimension -> steps
* each element contains steps for a dimension
* dimensions are all dimensions in source and target domain
* each step is (from_level, to_level, action, (weight_level, weight_var))
filetype(str): "png" or "svg"
dpi(int): resolution for png image
"""
dot_cmd = get_dot_cmd(filetype=filetype, dpi=dpi)
dot_components = get_components(dim_steps)
dot_str = get_dot_digraph_str(dot_components)
image_bytes = get_image_bytes(dot_cmd, dot_str)
return image_bytes
|
4738f9512065a9d0d6e33879954581cbf0940a11
| 3,645,432
|
import statistics
def get_ei_border_ratio_from_exon_id(exon_id, regid2nc_dic,
exid2eibrs_dic=None,
ratio_mode=1,
last_exon_dic=None,
last_exon_ratio=2.5,
min_reg_cov=5,
min_reg_mode=1):
"""
Ratio is average of ratios at both exon ends (if embedded in introns),
or if first / last exon, only one ratio.
Assign -1, if only exon, or if both exon and intron border region read
count below min_reg_cov.
min_reg_cov:
Minimum region read coverage. If both exon and intron border region
have < min_reg_cov, return ratio of -1.
regid2nc_dic:
Contains exon/intron/border region ID -> [norm_cov, coverage, reg_len]
exid2eibrs_dic:
Exon ID to all EIB ratios list mapping.
ratio_mode:
How to calculate the returned EIBR ratio.
1: Return the exon-intro border ratio with the higher coverage.
2: Average the two exon-intron border ratios of the exon,
if both have more than > min_reg_cov
last_exon_dic:
Last transcript exon ID -> polarity
Used for prioritizing the inner exon intron border for multi-exon
transcript last exons. Only effective for ratio_mode 1.
last_exon_ratio:
If the outer last exon read count is higher last_exon_ratio, prioritize
the outter border again, i.e. select the outter ratio
for EIB ratio calculation.
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.5, 10, 20], "t1_e1_ebi2" : [0.2, 4, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(2.5, 'first_exon')
>>> get_ei_border_ratio_from_exon_id("t2_e1", regid2nc_dic)
(-1, 'single_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [1.0, 20, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(3.0, 'inner_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [0.1, 2, 20], "t1_e2_ebi2" : [0.1, 2, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(2.0, 'inner_exon_ds_lc')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.1, 2, 20], "t1_e2_ebi1" : [0.1, 2, 20], "t1_e2_ebe2" : [0.5, 10, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=2)
(2.0, 'inner_exon_us_lc')
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.5, 10, 20], "t1_e1_ebi2" : [0.0, 0, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(10, 'first_exon')
>>> regid2nc_dic = {"t1_e1_ebe2" : [0.0, 0, 20], "t1_e1_ebi2" : [0.5, 10, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e1", regid2nc_dic)
(0.0, 'first_exon')
>>> regid2nc_dic = {"t1_e2_ebe1" : [0.5, 10, 20], "t1_e2_ebi1" : [0.25, 5, 20], "t1_e2_ebe2" : [1.0, 20, 20], "t1_e2_ebi2" : [0.25, 5, 20]}
>>> get_ei_border_ratio_from_exon_id("t1_e2", regid2nc_dic, ratio_mode=1)
(4.0, 'inner_exon')
"""
exb_id_e1 = exon_id + "_ebe1"
exb_id_i1 = exon_id + "_ebi1"
exb_id_e2 = exon_id + "_ebe2"
exb_id_i2 = exon_id + "_ebi2"
# For single-exon transcripts.
if exb_id_e1 not in regid2nc_dic and exb_id_e2 not in regid2nc_dic:
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [-1]
return -1, "single_exon"
# Last exon.
if exb_id_e1 in regid2nc_dic and exb_id_e2 not in regid2nc_dic:
assert exb_id_i1 in regid2nc_dic, "exb_id_e1 %s in regid2nc_dic, but not exb_id_i1 %s" %(exb_id_e1, exb_id_i1)
ratio1 = -1
sel_crit = "last_exon"
if regid2nc_dic[exb_id_e1][1] >= min_reg_cov or regid2nc_dic[exb_id_i1][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i1][0]:
ratio1 = regid2nc_dic[exb_id_e1][0] / regid2nc_dic[exb_id_i1][0]
else:
ratio1 = regid2nc_dic[exb_id_e1][1]
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [ratio1]
return ratio1, sel_crit
# First exon.
if exb_id_e1 not in regid2nc_dic and exb_id_e2 in regid2nc_dic:
assert exb_id_i2 in regid2nc_dic, "exb_id_e2 %s in regid2nc_dic, but not exb_id_i2 %s" %(exb_id_e2, exb_id_i2)
ratio2 = -1
sel_crit = "first_exon"
if regid2nc_dic[exb_id_e2][1] >= min_reg_cov or regid2nc_dic[exb_id_i2][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i2][0]:
ratio2 = regid2nc_dic[exb_id_e2][0] / regid2nc_dic[exb_id_i2][0]
else:
ratio2 = regid2nc_dic[exb_id_e2][1]
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic"
exid2eibrs_dic[exon_id] = [ratio2]
return ratio2, sel_crit
# In-between exons.
if exb_id_e1 in regid2nc_dic and exb_id_e2 in regid2nc_dic:
assert exb_id_i1 in regid2nc_dic, "exb_id_e1 %s in regid2nc_dic, but not exb_id_i1 %s" %(exb_id_e1, exb_id_i1)
assert exb_id_i2 in regid2nc_dic, "exb_id_e2 %s in regid2nc_dic, but not exb_id_i2 %s" %(exb_id_e2, exb_id_i2)
ratio1 = -1
ratio2 = -1
# if exon_id == "ENST00000366553.3_e2":
# print(exon_id)
# print("regid2nc_dic[exb_id_i1][1]:", regid2nc_dic[exb_id_i1][1])
# print("regid2nc_dic[exb_id_e1][1]:", regid2nc_dic[exb_id_e1][1])
# print("regid2nc_dic[exb_id_e2][1]:", regid2nc_dic[exb_id_e2][1])
# print("regid2nc_dic[exb_id_i2][1]:", regid2nc_dic[exb_id_i2][1])
# print("regid2nc_dic[exb_id_i1][0]:", regid2nc_dic[exb_id_i1][0])
# print("regid2nc_dic[exb_id_e1][0]:", regid2nc_dic[exb_id_e1][0])
# print("regid2nc_dic[exb_id_e2][0]:", regid2nc_dic[exb_id_e2][0])
# print("regid2nc_dic[exb_id_i2][0]:", regid2nc_dic[exb_id_i2][0])
sel_crit = "inner_exon"
if regid2nc_dic[exb_id_e1][1] >= min_reg_cov or regid2nc_dic[exb_id_i1][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i1][0]:
ratio1 = regid2nc_dic[exb_id_e1][0] / regid2nc_dic[exb_id_i1][0]
else:
ratio1 = regid2nc_dic[exb_id_e1][1]
else:
sel_crit += "_us_lc"
if regid2nc_dic[exb_id_e2][1] >= min_reg_cov or regid2nc_dic[exb_id_i2][1] >= min_reg_cov:
if regid2nc_dic[exb_id_i2][0]:
ratio2 = regid2nc_dic[exb_id_e2][0] / regid2nc_dic[exb_id_i2][0]
else:
ratio2 = regid2nc_dic[exb_id_e2][1]
else:
sel_crit += "_ds_lc"
if exid2eibrs_dic is not None:
assert exon_id not in exid2eibrs_dic, "exon ID %s already stored in exid2eibrs_dic" %(exon_id)
exid2eibrs_dic[exon_id] = [ratio1, ratio2]
if ratio1 == -1 and ratio2 != -1:
avg_ratio = ratio2
elif ratio1 != -1 and ratio2 == -1:
avg_ratio = ratio1
elif ratio1 == -1 and ratio2 == -1:
avg_ratio = -1
else:
if ratio_mode == 1:
cov_b1 = regid2nc_dic[exb_id_i1][0] + regid2nc_dic[exb_id_e1][0]
cov_b2 = regid2nc_dic[exb_id_i2][0] + regid2nc_dic[exb_id_e2][0]
if cov_b1 > cov_b2:
avg_ratio = ratio1
else:
avg_ratio = ratio2
if last_exon_dic is not None:
if exon_id in last_exon_dic:
sel_crit = "last_exon"
exon_pol = last_exon_dic[exon_id]
# Define inner borders.
cov_inner = cov_b1
ratio_inner = ratio1
cov_outer = cov_b2
ratio_outer = ratio2
if exon_pol == "-":
cov_inner = cov_b2
ratio_inner = ratio2
cov_outer = cov_b1
ratio_outer = ratio1
if cov_inner*last_exon_ratio >= cov_outer:
avg_ratio = ratio_inner
sel_crit += "_inner"
else:
avg_ratio = ratio_outer
sel_crit += "_outer"
elif ratio_mode == 2:
avg_ratio = statistics.mean([ratio1, ratio2])
else:
assert False, "invalid ratio_mode (%i)" %(ratio_mode)
return avg_ratio, sel_crit
assert False, "invalid get_ei_border_ratio_from_exon_id()"
|
fd5239fabb81d328d644dbb8b56608eda15e78ce
| 3,645,433
|
def events(*_events):
""" A class decorator. Adds auxiliary methods for callback based event
notification of multiple watchers.
"""
def add_events(cls):
# Maintain total event list of both inherited events and events added
# using nested decorations.
try:
all_events = cls.events
except AttributeError:
cls.events = _events
else:
cls.events = all_events + _events
for e in _events:
helpers = {}
exec("""
@lazy
def {event}_handlers(self):
return []
def {event}(self, *a, **kw):
for h in list(self.{handlers}):
h(*a, **kw)
def watch_{event}(self, cb):
self.{handlers}.append(cb)
def unwatch_{event}(self, cb):
self.{handlers}.remove(cb)
""".format(event = e, handlers = e + "_handlers"),
globals(), helpers
)
for n, h in helpers.items():
setattr(cls, n, h)
return cls
return add_events
|
601f7d55ff4d05dd0aca552213dcd911f15c91b6
| 3,645,434
|
def _find_nearest(array, value):
"""Find the nearest numerical match to value in an array.
Args:
array (np.ndarray): An array of numbers to match with.
value (float): Single value to find an entry in array that is close.
Returns:
np.array: The entry in array that is closest to value.
"""
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return array[idx]
|
7440447c4079563722b91771f07fcd3c3f5e0c3b
| 3,645,435
|
import requests
from bs4 import BeautifulSoup
def download_document(url):
"""Downloads document using BeautifulSoup, extracts the subject and all
text stored in paragraph tags
"""
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
title = soup.find('title').get_text()
document = ' '.join([p.get_text() for p in soup.find_all('p')])
return document
|
8bb9055b40dd5554185ddec1d3218157a016bfd8
| 3,645,436
|
def rz_gate(phi: float = 0):
"""Functional for the single-qubit Pauli-Z rotation-gate.
Parameters
----------
phi : float
Rotation angle (in radians)
Returns
-------
rz : (2, 2) np.ndarray
"""
arg = 1j * phi / 2
return np.array([[np.exp(-arg), 0], [0, np.exp(arg)]])
|
c148a03f3525698c44e5f8aa14085bfeb29c72ef
| 3,645,437
|
from typing import List
def dict_to_kvp(dictionary: dict) -> List[tuple]:
"""
Converts a dictionary to a list of tuples where each tuple has the key and value
of each dictionary item
:param dictionary: Dictionary to convert
:return: List of Key-Value Pairs
"""
return [(k, v) for k, v in dictionary.items()]
|
2b856ebb218884a4975d316bebe27546070f2083
| 3,645,438
|
def convert_and_remove_punctuation(text):
"""
remove punctuation that are not allowed, e.g. / \
convert Chinese punctuation into English punctuation, e.g. from「 to "
"""
# removal
text = text.replace("\\", "")
text = text.replace("\\", "")
text = text.replace("[", "")
text = text.replace("]", "")
text = text.replace("【", "")
text = text.replace("】", "")
text = text.replace("{", "")
text = text.replace("}", "")
# conversion
text = text.replace(u"\u201C", "\"")
text = text.replace(u"\u201D", "\"")
text = text.replace(u"\u2018", "'")
text = text.replace(u"\u2019", "'")
text = text.replace("「", "\"")
text = text.replace("」", "\"")
text = text.replace("『", "\"")
text = text.replace("』", "\"")
text = text.replace("quot;", "\"")
return text
|
2de1f930ca76da7fec3467469f98b0e0858e54a0
| 3,645,439
|
def create_random_context(dialog,rng,minimum_context_length=2,max_context_length=20):
"""
Samples random context from a dialog. Contexts are uniformly sampled from the whole dialog.
:param dialog:
:param rng:
:return: context, index of next utterance that follows the context
"""
# sample dialog context
#context_turns = rng.randint(minimum_context_length,len(dialog)-1)
max_len = min(max_context_length, len(dialog)) - 2
if max_len <= minimum_context_length:
context_turns = max_len
else:
context_turns = rng.randint(minimum_context_length,max_len)
# create string
return dialog_turns_to_string(dialog[:context_turns]),context_turns
|
d66ee8f185380801735644a7ce4528f398385e60
| 3,645,440
|
def dev_test_new_schema_version(dbname, sqldb_dpath, sqldb_fname,
version_current, version_next=None):
"""
hacky function to ensure that only developer sees the development schema
and only on test databases
"""
TESTING_NEW_SQL_VERSION = version_current != version_next
if TESTING_NEW_SQL_VERSION:
print('[sql] ATTEMPTING TO TEST NEW SQLDB VERSION')
devdb_list = ['PZ_MTEST', 'testdb1', 'testdb0', 'testdb2',
'testdb_dst2', 'emptydatabase']
testing_newschmea = ut.is_developer() and dbname in devdb_list
#testing_newschmea = False
#ut.is_developer() and ibs.get_dbname() in ['PZ_MTEST', 'testdb1']
if testing_newschmea:
# Set to true until the schema module is good then continue tests
# with this set to false
testing_force_fresh = True or ut.get_argflag('--force-fresh')
# Work on a fresh schema copy when developing
dev_sqldb_fname = ut.augpath(sqldb_fname, '_develop_schema')
sqldb_fpath = join(sqldb_dpath, sqldb_fname)
dev_sqldb_fpath = join(sqldb_dpath, dev_sqldb_fname)
ut.copy(sqldb_fpath, dev_sqldb_fpath, overwrite=testing_force_fresh)
# Set testing schema version
#ibs.db_version_expected = '1.3.6'
print('[sql] TESTING NEW SQLDB VERSION: %r' % (version_next,))
#print('[sql] ... pass --force-fresh to reload any changes')
return version_next, dev_sqldb_fname
else:
print('[ibs] NOT TESTING')
return version_current, sqldb_fname
|
ec57d6ccb39d76159ab80c6fdfe094b486d00777
| 3,645,441
|
def _get_distance_euclidian(row1: np.array, row2: np.array):
"""
_get_distance
returns the distance between 2 rows
(euclidian distance between vectors)
takes into account all columns of data given
"""
distance = 0.
for i, _ in enumerate(row1):
distance += (row1[i] - row2[i]) ** 2
return np.sqrt(distance)
|
13a3944becf717222eb6fc997ceb937ad37b30ab
| 3,645,442
|
import re
def _get_ip_from_response(response):
"""
Filter ipv4 addresses from string.
Parameters
----------
response: str
String with ipv4 addresses.
Returns
-------
list: list with ip4 addresses.
"""
ip = re.findall(r'\b(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b', response)
return ip
|
ac36a3b729b0ce4ba13a6db550a71276319cbd70
| 3,645,443
|
from typing import Optional
from typing import List
import logging
def create_processor(
options: options_pb2.ConvertorOptions,
theorem_database: Optional[proof_assistant_pb2.TheoremDatabase] = None,
tactics: Optional[List[deephol_pb2.Tactic]] = None) -> ProofLogToTFExample:
"""Factory function for ProofLogToTFExample."""
if theorem_database and options.theorem_database_path:
raise ValueError(
'Both thereom database as well as a path to load it from file '
'provided. Only provide one.')
if not theorem_database:
theorem_database = io_util.load_theorem_database_from_file(
str(options.theorem_database_path))
if tactics and options.tactics_path:
raise ValueError('Both tactics as well as a path to load it from '
'provided. Only provide one.')
if not tactics:
tactics = io_util.load_tactics_from_file(str(options.tactics_path), None)
tactics_name_id_map = {tactic.name: tactic.id for tactic in tactics}
if options.replacements_hack:
logging.warning('Replacments hack is enabled.')
tactics_name_id_map.update({
'GEN_TAC': 8,
'MESON_TAC': 11,
'CHOOSE_TAC': 34,
})
if options.format != options_pb2.ConvertorOptions.HOLPARAM:
raise ValueError('Unknown options_pb2.ConvertorOptions.TFExampleFormat.')
return ProofLogToTFExample(tactics_name_id_map, theorem_database, options)
|
898a72372a80546f4de277c5f3e3573c7f8edff6
| 3,645,444
|
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
|
132a3b1bb8a0e7b3c92ac15e2d68337eeef19042
| 3,645,445
|
def lsh(B_BANDS, docIdList, sig):
""" Applies the LSH algorithm. This function first divides the signature matrix into bands and hashes each column onto buckets.
:param B_BANDS: Number of bands in signature matrix
:param docIdList: List of document ids
:param sig: signature matrix
:return: List of document to its hash along with the buckets
"""
numHash = number_of_hash
bands = getbestb(threshold,number_of_hash)
rows = numHash / bands
d = 1681
# Array of dictionaries, each dictionary is for each band which will hold buckets for hashed vectors in that band
buckets = np.full(bands, {})
# Mapping from docid to h to find the buckets in which document with docid was hashed
docth = np.zeros((d, bands), dtype=int) # doc to hash
for i in range(bands):
for j in range(d):
low = int(i*rows) # First row in a band
high = min(int((i+1)*rows), numHash)# Last row in current band
l = []
for x in range(low, high):
l.append(sig[x, j]) # Append each row into l
h = int(hash(tuple(l))) % (d+1)
try:
buckets[i][h].append(j) # If a bucket corresponds to this hash value append this document into it
except:
buckets[i][h] = {j}
docth[j][i] = h
# print(docth)
return docth, buckets
|
ad6071e52d2c442764e57bb68e2f1e2d4c5a7c2e
| 3,645,447
|
def langevin_coefficients(
temperature,
dt,
friction,
masses):
"""
Compute coefficients for langevin dynamics
Parameters
----------
temperature: float
units of Kelvin
dt: float
units of picoseconds
friction: float
frequency in picoseconds
masses: array
mass of each atom in standard mass units
Returns
-------
tuple (ca, cb, cc)
ca is scalar, and cb and cc are n length arrays
that are used during langevin dynamics
"""
vscale = np.exp(-dt*friction)
if friction == 0:
fscale = dt
else:
fscale = (1-vscale)/friction
kT = BOLTZ * temperature
nscale = np.sqrt(kT*(1-vscale*vscale)) # noise scale
invMasses = 1.0/masses
sqrtInvMasses = np.sqrt(invMasses)
ca = vscale
cb = fscale*invMasses
cc = nscale*sqrtInvMasses
return ca, cb, cc
|
680d5c8898ecb7c0627232c8c993bb0f64a2e9d3
| 3,645,448
|
def waitpid_handle_exceptions(pid, deadline):
"""Wrapper around os.waitpid()/waitpid_with_timeout(), which waits until
either a child process exits or the deadline elapses, and retries if certain
exceptions occur.
Args:
pid: Process ID to wait for, or -1 to wait for any child process.
deadline: If non-zero, waiting stops when time.time() exceeds this value.
If zero, waiting stops when a child process exits.
Returns:
(pid, status): Same as for waitpid_with_timeout(). |pid| is non-zero if and
only if a child exited during the wait.
Raises:
Same as for os.waitpid(), except:
OSError with errno==EINTR causes the wait to be retried (this can happen,
for example, if this parent process receives SIGHUP).
OSError with errno==ECHILD means there are no child processes, and so
this function sleeps until |deadline|. If |deadline| is zero, this is an
error and the OSError exception is raised in this case.
"""
while True:
try:
if deadline == 0:
pid_result, status = os.waitpid(pid, 0)
else:
pid_result, status = waitpid_with_timeout(pid, deadline)
return (pid_result, status)
except OSError, e:
if e.errno == errno.EINTR:
continue
elif e.errno == errno.ECHILD:
now = time.time()
if deadline == 0:
# No time-limit and no child processes. This is treated as an error
# (see docstring).
raise
elif deadline > now:
time.sleep(deadline - now)
return (0, 0)
else:
# Anything else is an unexpected error.
raise
|
2d15594c9b066b3e1000a6394503a9b8a88e5420
| 3,645,449
|
def preprocess(image, image_size):
"""
Preprocess
pre-process the image by to adaptive_treshold, perspectiv_transform,
erode, diletate, resize
:param image: image of display from cv2.read
:return out_image: output image after preprocessing
"""
# blurr
blurred = cv2.GaussianBlur(image, (5, 5), 1)
# perspective transformation
out_img = myPerspectiveTransformation(blurred)
# resize it
out_img = resizeSquareRespectAcpectRatio(
out_img,
image_size,
cv2.INTER_AREA
)
return out_img
|
497d3d1a32be643486903d44621ff203503b726e
| 3,645,451
|
import urllib
import json
import time
def download(distributor: Distributor, max_try:int = 4) -> list[TrainInformation]|None:
"""Download train information from distributor.
If response status code was 500-599, this function retries up to max_try times.
Parameters
----------
distributor : Distributor
Distributor of infomation source.
max_try : int, optional
If response status code was 500-599, it retries up to this value.(default = 4)
Returns
-------
list[TrainInformation]|None
List of train information which is downloaded from web, or None if consumerKey is unset.
Raises
------
InvalidParameterError
HTTP status code was 400.
InvalidConsumerKeyError
HTTP status code was 401.
Forbidden
HTTP status code was 403.
NotFound
HTTP status code was 404.
OdptServerError
HTTP status code was 500-599.
UnknownHTTPError
HTTP status code was unexpected.
"""
if not distributor.is_valid():
return None
query = {}
query["acl:consumerKey"] = distributor.consumer_key
json_dict:list[TrainInformation_jsondict] = []
for try_count in range(max_try):
try:
with urllib.request.urlopen("%s?%s" % (distributor.URL, urllib.parse.urlencode(query))) as f:
json_dict = json.load(f)
break
except HTTPError as e:
match e.code:
case 400:
raise InvalidParameterError(e)
case 401:
raise InvalidConsumerKeyError(e)
case 403:
raise Forbidden(e)
case 404:
raise NotFound(e, distributor.value)
case code if 500 <= code < 600:
if try_count == max_try-1:
raise OdptServerError(e)
else:
time.sleep(1+try_count)
continue
case _:
raise UnknownHTTPError(e)
except Exception as e:
if try_count == max_try-1:
raise
else:
time.sleep(1+try_count)
continue
return TrainInformation.from_list(json_dict)
|
1288e50807465164dd4aa2e082b4136abe81636c
| 3,645,452
|
def add_payloads(prev_layer, input_spikes):
"""Get payloads from previous layer."""
# Get only payloads of those pre-synaptic neurons that spiked
payloads = tf.where(tf.equal(input_spikes, 0.),
tf.zeros_like(input_spikes), prev_layer.payloads)
print("Using spikes with payloads from layer {}".format(prev_layer.name))
return input_spikes + payloads
|
4f7bd805e8659ddea0da63fd542edb6d52073569
| 3,645,453
|
def read_csv_to_data(path: str, delimiter: str = ",", headers: list = []):
"""A zero-dependancy helper method to read a csv file
Given the path to a csv file, read data row-wise. This data may be later converted to a dict of lists if needed (column-wise).
Args:
path (str): Path to csv file
delimiter (str, optional): Delimiter to split the rows by. Defaults to ','
headers: (list, optional): Given header list for a csv file. Defaults to an empty list, which results in the first row being used as a header.
Returns:
A list of dictionary values (list of rows) representing the file being read
"""
data = []
with open(path, "r") as f:
header = headers
if len(headers) == 0:
header = f.readline().split(",")
for line in f:
entry = {}
for i, value in enumerate(line.split(",")):
entry[header[i].strip()] = value.strip()
data.append(entry)
return data
|
f60e163e770680efd1f8944becd79a0dd7ceaa08
| 3,645,454
|
def main_menu(update, context):
"""Handling the main menu
:param update: Update of the sent message
:param context: Context of the sent message
:return: Status for main menu
"""
keyboard = [['Eintragen'],
['Analyse']]
update.message.reply_text(
'Was möchtest du machen?',
reply_markup=ReplyKeyboardMarkup(keyboard)
)
return MAIN
|
bafc092ec662286f417a9a5d2c47a675336c4825
| 3,645,455
|
def build_model(inputs, num_classes, is_training, hparams):
"""Constructs the vision model being trained/evaled.
Args:
inputs: input features/images being fed to the image model build built.
num_classes: number of output classes being predicted.
is_training: is the model training or not.
hparams: additional hyperparameters associated with the image model.
Returns:
The logits of the image model.
"""
scopes = setup_arg_scopes(is_training)
if len(scopes) != 1:
raise ValueError('Nested scopes depreciated in py3.')
with scopes[0]:
if hparams.model_name == 'pyramid_net':
logits = build_shake_drop_model(inputs, num_classes, is_training)
elif hparams.model_name == 'wrn':
logits = build_wrn_model(inputs, num_classes, hparams.wrn_size)
elif hparams.model_name == 'shake_shake':
logits = build_shake_shake_model(inputs, num_classes, hparams,
is_training)
elif hparams.model_name == 'resnet':
logits = build_resnet_model(inputs, num_classes, hparams,
is_training)
else:
raise ValueError("Unknown model name.")
return logits
|
0ad57496d77e4406c5081982a2c02f2111cb5b57
| 3,645,456
|
import flask_monitoringdashboard
def get_test_app_for_status_code_testing(schedule=False):
"""
:return: Flask Test Application with the right settings
"""
app = Flask(__name__)
@app.route('/return-a-simple-string')
def return_a_simple_string():
return 'Hello, world'
@app.route('/return-a-tuple')
def return_a_tuple():
return 'Hello, world', 404
@app.route('/ridiculous-return-value')
def return_ridiculous_return_value():
return 'hello', 'ridiculous'
@app.route('/return-jsonify-default-status-code')
def return_jsonify_default_status_code():
return jsonify({
'apples': 'banana'
})
@app.route('/return-jsonify-with-custom-status-code')
def return_jsonify_with_custom_status_code():
response = jsonify({
'cheese': 'pears'
})
response.status_code = 401
return response
@app.route('/unhandled-exception')
def unhandled_exception():
potatoes = 1000
bananas = 0
return potatoes / bananas
app.config['SECRET_KEY'] = flask_monitoringdashboard.config.security_token
app.testing = True
flask_monitoringdashboard.user_app = app
app.config['WTF_CSRF_ENABLED'] = False
app.config['WTF_CSRF_METHODS'] = []
flask_monitoringdashboard.config.get_group_by = lambda: '12345'
flask_monitoringdashboard.bind(app=app, schedule=schedule)
TEST_CACHE = {'main': EndpointInfo()}
flask_monitoringdashboard.core.cache.memory_cache = TEST_CACHE
return app
|
69951350c8b14cf02b1327773665d9080b0eeb48
| 3,645,457
|
def run_multiple_cases(x, y, z, door_height, door_width, t_amb,
HoC, time_ramp, hrr_ramp, num, door, wall,
simulation_time, dt_data):
"""
Generate multiple CFAST input files and calls other functions
"""
resulting_temps = np.array([])
for i in range(len(door_width)):
casename = gen_input(x, y, z, door_height[i], door_width[i],
t_amb[i], HoC, time_ramp, hrr_ramp, num, door,
wall, simulation_time, dt_data)
run_cfast(casename)
temps, outfile = read_cfast(casename)
outfile.close()
hgl = temps[:,1]
resulting_temps = np.append(hgl[-1], resulting_temps)
return(resulting_temps)
|
1c056b4c991889b81324857788cda416f90a8cdc
| 3,645,459
|
def get_all():
"""
Obtiene todas las tuplas de la relación Estudiantes
:returns: Todas las tuplas de la relación.
:rtype: list
"""
try:
conn = helpers.get_connection()
cur = conn.cursor()
cur.execute(ESTUDIANTE_QUERY_ALL)
result = cur.fetchall()
# Confirma los cambios y libera recursos
conn.commit()
cur.close()
conn.close()
return result
except Exception as e:
raise e
|
8b2248f09b02bf8fb4198bd36e743a5d052dd9f3
| 3,645,460
|
import warnings
def load_fgong(filename, fmt='ivers', return_comment=False,
return_object=True, G=None):
"""Given an FGONG file, returns NumPy arrays ``glob`` and ``var`` that
correspond to the scalar and point-wise variables, as specified
in the `FGONG format`_.
.. _FGONG format: https://www.astro.up.pt/corot/ntools/docs/CoRoT_ESTA_Files.pdf
Also returns the first four lines of the file as a `comment`, if
desired.
The version number ``ivers`` is used to infer the format of floats
if ``fmt='ivers'``.
If ``return_object`` is ``True``, instead returns an :py:class:`FGONG`
object. This is the default behaviour as of v0.0.12. The old
behaviour will be dropped completely from v0.1.0.
Parameters
----------
filename: str
Name of the FGONG file to read.
fmt: str, optional
Format string for floats in `glob` and `var`. If ``'ivers'``,
uses ``%16.9E`` if the file's ``ivers < 1000`` or ``%26.18E3` if
``ivers >= 1000``. If ``'auto'``, tries to guess the size of each
float. (default: 'ivers')
return_comment: bool, optional
If ``True``, return the first four lines of the FGONG file.
These are comments that are not used in any calculations.
Returns
-------
glob: NumPy array
The scalar (or global) variables for the stellar model
var: NumPy array
The point-wise variables for the stellar model. i.e. things
that vary through the star like temperature, density, etc.
comment: list of strs, optional
The first four lines of the FGONG file. These are comments
that are not used in any calculations. Only returned if
``return_comment=True``.
"""
with tomso_open(filename, 'rb') as f:
comment = [f.readline().decode('utf-8').strip() for i in range(4)]
nn, iconst, ivar, ivers = [int(i) for i in f.readline().decode('utf-8').split()]
# lines = f.readlines()
lines = [line.decode('utf-8').lower().replace('d', 'e')
for line in f.readlines()]
tmp = []
if fmt == 'ivers':
if ivers < 1000:
N = 16
else:
N = 27
# try to guess the length of each float in the data
elif fmt == 'auto':
N = len(lines[0])//5
else:
N = len(fmt % -1.111)
for line in lines:
for i in range(len(line)//N):
s = line[i*N:i*N+N]
# print(s)
if s[-9:] == '-Infinity':
s = '-Inf'
elif s[-9:] == ' Infinity':
s = 'Inf'
elif s.lower().endswith('nan'):
s = 'nan'
elif 'd' in s.lower():
s = s.lower().replace('d','e')
tmp.append(float(s))
glob = np.array(tmp[:iconst])
var = np.array(tmp[iconst:]).reshape((-1, ivar))
if return_object:
return FGONG(glob, var, ivers=ivers, G=G,
description=comment)
else:
warnings.warn("From tomso 0.1.0+, `fgong.load_fgong` will only "
"return an `FGONG` object: use `return_object=True` "
"to mimic future behaviour",
FutureWarning)
if return_comment:
return glob, var, comment
else:
return glob, var
|
17fcac5511a588351701f921dc8449d81a603fb6
| 3,645,461
|
import ctypes
def dskb02(handle, dladsc):
"""
Return bookkeeping data from a DSK type 2 segment.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dskb02_c.html
:param handle: DSK file handle
:type handle: int
:param dladsc: DLA descriptor
:type dladsc: spiceypy.utils.support_types.SpiceDLADescr
:return: bookkeeping data from a DSK type 2 segment
:rtype: tuple
"""
handle = ctypes.c_int(handle)
nv = ctypes.c_int(0)
np = ctypes.c_int(0)
nvxtot = ctypes.c_int(0)
vtxbds = stypes.emptyDoubleMatrix(3, 2)
voxsiz = ctypes.c_double(0.0)
voxori = stypes.emptyDoubleVector(3)
vgrext = stypes.emptyIntVector(3)
cgscal = ctypes.c_int(0)
vtxnpl = ctypes.c_int(0)
voxnpt = ctypes.c_int(0)
voxnpl = ctypes.c_int(0)
libspice.dskb02_c(handle, dladsc, ctypes.byref(nv), ctypes.byref(np), ctypes.byref(nvxtot), vtxbds, ctypes.byref(voxsiz), voxori, vgrext, ctypes.byref(cgscal), ctypes.byref(vtxnpl), ctypes.byref(voxnpt), ctypes.byref(voxnpl))
return nv.value, np.value, nvxtot.value, stypes.cMatrixToNumpy(vtxbds), voxsiz.value, stypes.cVectorToPython(voxori), stypes.cVectorToPython(vgrext), cgscal.value, vtxnpl.value, voxnpt.value, voxnpl.value
|
b08eed84bd518d35166ee28df8f87c06b08220c4
| 3,645,463
|
def train_node2vec(graph, dim, p, q):
"""Obtains node embeddings using Node2vec."""
emb = n2v.Node2Vec(
graph=graph,
dimensions=dim,
workers=mp.cpu_count(),
p=p,
q=q,
quiet=True,
).fit()
emb = {
node_id: emb.wv[str(node_id)]
for node_id in sorted(graph.nodes())
}
return emb
|
7cea146b2971e973de2ecd365ad25c7f4fd57289
| 3,645,465
|
def approx_match_dictionary():
"""Maps abbreviations to the part of the expanded form that is common beween all forms of the word"""
k=["%","bls","gr","hv","hæstv","kl","klst","km","kr","málsl",\
"málsgr","mgr","millj","nr","tölul","umr","þm","þskj","þús"]
v=['prósent','blaðsíð',\
'grein','háttvirt',\
'hæstvirt','klukkan',\
'klukkustund','kílómetr',\
'krón','málslið',\
'málsgrein','málsgrein',\
'milljón','númer','tölulið',\
'umræð','þingm',\
'þingskj','þúsund']
d={}
for i in range(len(k)):
d[k[i]] = v[i]
return d
|
021c7de862b2559b55051bc7267113d77132e195
| 3,645,466
|
def matrix2array(M):
"""
1xN matrix to array.
In other words:
[[1,2,3]] => [1,2,3]
"""
if isspmatrix(M):
M = M.todense()
return np.squeeze(np.asarray(M))
|
731317458f6ec7c068c1a9450447eba39e1423f9
| 3,645,467
|
def expected(data):
"""Computes the expected agreement, Pr(e), between annotators."""
total = float(np.sum(data))
annotators = range(len(data.shape))
percentages = ((data.sum(axis=i) / total) for i in annotators)
percent_expected = np.dot(*percentages)
return percent_expected
|
86562fec2b17df35401b8d8b7eafd759a13715e3
| 3,645,468
|
import numpy
def maximization_step(num_words, stanzas, schemes, probs):
"""
Update latent variables t_table, rprobs
"""
t_table = numpy.zeros((num_words, num_words + 1))
rprobs = numpy.ones(schemes.num_schemes)
for i, stanza in enumerate(stanzas):
scheme_indices = schemes.get_schemes_for_len(len(stanza))
for scheme_index in scheme_indices:
myprob = probs[i, scheme_index]
rprobs[scheme_index] += myprob
scheme = schemes.scheme_list[scheme_index]
rhymelists = get_rhymelists(stanza, scheme)
for rhymelist in rhymelists:
for j, word_index in enumerate(rhymelist):
t_table[word_index, -1] += myprob
for word_index2 in rhymelist[:j] + rhymelist[j + 1:]:
t_table[word_index, word_index2] += myprob
# Normalize t_table
t_table_sums = numpy.sum(t_table, axis=0)
for i, t_table_sum in enumerate(t_table_sums.tolist()):
if t_table_sum != 0:
t_table[:, i] /= t_table_sum
# Normalize rprobs
totrprob = numpy.sum(rprobs)
rprobs /= totrprob
return t_table, rprobs
|
a0e23367d6dff50d79bb828a0af8a82b640400c8
| 3,645,469
|
import json
def account_export_mydata_content(account_id=None):
"""
Export ServiceLinks
:param account_id:
:return: List of dicts
"""
if account_id is None:
raise AttributeError("Provide account_id as parameter")
# Get table names
logger.info("ServiceLinkRecord")
db_entry_object = ServiceLinkRecord()
slr_table_name = db_entry_object.table_name
logger.info("ServiceLinkRecord table name: " + str(slr_table_name))
logger.info("ConsentRecord")
db_entry_object = ConsentRecord()
cr_table_name = db_entry_object.table_name
logger.info("ConsentRecord table name: " + str(cr_table_name))
# Get DB cursor
try:
cursor = get_db_cursor()
except Exception as exp:
logger.error('Could not get database cursor: ' + repr(exp))
raise
logger.info("Get SLR IDs")
db_entry_list = []
cursor, slr_id_list = get_slr_ids(cursor=cursor, account_id=account_id, table_name=slr_table_name)
for slr_id in slr_id_list:
logger.info("Getting SLR with slr_id: " + str(slr_id))
slr_dict = account_get_slr(account_id=account_id, slr_id=slr_id)
#
logger.info("Getting status records for SLR")
slsr_dict = account_get_slsrs(account_id=account_id, slr_id=slr_id)
logger.info("Appending status record to SLR")
slr_dict['status_records'] = slsr_dict
#
logger.info("Get CR IDs")
cr_dict_list = []
cursor, cr_id_list = get_cr_ids(slr_id=slr_id, table_name=cr_table_name, cursor=cursor)
for cr_id in cr_id_list:
logger.info("Getting CR with cr_id: " + str(cr_id))
cr_dict = account_get_cr(cr_id=cr_id, account_id=account_id)
logger.info("Getting status records for CR")
csr_dict = account_get_csrs(account_id=account_id, consent_id=cr_id)
logger.info("Appending status record to CR")
cr_dict['status_records'] = csr_dict
logger.info("Appending CR to CR list")
cr_dict_list.append(cr_dict)
#
slr_dict['consent_records'] = cr_dict_list
#
logger.info("Appending SLR to main list")
db_entry_list.append(slr_dict)
logger.info("SLR added to main list: " + json.dumps(slr_dict))
return db_entry_list
|
d61dd638319479572ecea5335f0a9a7fc7156410
| 3,645,470
|
from typing import List
def indicator_entity(indicator_types: List[str] = None) -> type:
"""Return custom model for Indicator Entity."""
class CustomIndicatorEntity(IndicatorEntity):
"""Indicator Entity Field (Model) Type"""
@validator('type', allow_reuse=True)
def is_empty(cls, value: str, field: 'ModelField') -> str:
"""Validate that the value is a non-empty string."""
if isinstance(value, str) and value.replace(' ', '') == '':
raise InvalidEmptyValue(field_name=field.name)
return value
@validator('type', allow_reuse=True)
def is_type(cls, value: str, field: 'ModelField') -> str:
"""Validate that the entity is of a specific Indicator type."""
if value.lower() not in [i.lower() for i in indicator_types]:
raise InvalidEntityType(
field_name=field.name, entity_type=str(indicator_types), value=value
)
return value
return CustomIndicatorEntity
|
f6c77ffd3b8415e07e0e64ab8120a084aab3e2c8
| 3,645,471
|
def z_to_t(z_values, dof):
"""
Convert z-statistics to t-statistics.
An inversion of the t_to_z implementation of [1]_ from Vanessa Sochat's
TtoZ package [2]_.
Parameters
----------
z_values : array_like
Z-statistics
dof : int
Degrees of freedom
Returns
-------
t_values : array_like
T-statistics
References
----------
.. [1] Hughett, P. (2007). Accurate Computation of the F-to-z and t-to-z
Transforms for Large Arguments. Journal of Statistical Software,
23(1), 1-5.
.. [2] Sochat, V. (2015, October 21). TtoZ Original Release. Zenodo.
http://doi.org/10.5281/zenodo.32508
"""
# Select just the nonzero voxels
nonzero = z_values[z_values != 0]
# We will store our results here
t_values_nonzero = np.zeros(len(nonzero))
# Select values less than or == 0, and greater than zero
c = np.zeros(len(nonzero))
k1 = nonzero <= c
k2 = nonzero > c
# Subset the data into two sets
z1 = nonzero[k1]
z2 = nonzero[k2]
# Calculate p values for <=0
p_values_z1 = stats.norm.cdf(z1)
t_values_z1 = stats.t.ppf(p_values_z1, df=dof)
# Calculate p values for > 0
p_values_z2 = stats.norm.cdf(-z2)
t_values_z2 = -stats.t.ppf(p_values_z2, df=dof)
t_values_nonzero[k1] = t_values_z1
t_values_nonzero[k2] = t_values_z2
t_values = np.zeros(z_values.shape)
t_values[z_values != 0] = t_values_nonzero
return t_values
|
4700f52263519169a4610daee8c0940489b2731e
| 3,645,472
|
def getInputShape(model):
"""
Gets the shape when there is a single input.
Return:
Numeric dimensions, omits dimensions that have no value. eg batch
size.
"""
s = []
for dim in model.input.shape:
if dim.value:
s.append(dim.value)
return tuple(s)
|
628f61a995784b9be79816a5bbcde2f8204640be
| 3,645,473
|
def get_node_depths(tree):
"""
Get the node depths of the decision tree
>>> d = DecisionTreeClassifier()
>>> d.fit([[1,2,3],[4,5,6],[7,8,9]], [1,2,3])
>>> get_node_depths(d.tree_)
array([0, 1, 1, 2, 2])
"""
def get_node_depths_(current_node, current_depth, l, r, depths):
depths += [current_depth]
if l[current_node] != -1 and r[current_node] != -1:
get_node_depths_(l[current_node], current_depth + 1, l, r, depths)
get_node_depths_(r[current_node], current_depth + 1, l, r, depths)
depths = []
get_node_depths_(0, 0, tree.children_left, tree.children_right, depths)
return np.array(depths)
|
4a5a001600c0cb6b1b545be003708088bbd2d060
| 3,645,475
|
import attr
from typing import Tuple
def homo_tuple_typed_attrs(draw, defaults=None, legacy_types_only=False, kw_only=None):
"""
Generate a tuple of an attribute and a strategy that yields homogenous
tuples for that attribute. The tuples contain strings.
"""
default = attr.NOTHING
val_strat = tuples(text(), text(), text())
if defaults is True or (defaults is None and draw(booleans())):
default = draw(val_strat)
return (
attr.ib(
type=draw(
sampled_from(
[tuple[str, ...], tuple, Tuple, Tuple[str, ...]]
if not legacy_types_only
else [tuple, Tuple, Tuple[str, ...]]
)
),
default=default,
kw_only=draw(booleans()) if kw_only is None else kw_only,
),
val_strat,
)
|
398e47ea6fb65ba0fab1e633ea27dc3cac30ed28
| 3,645,476
|
from typing import Dict
from typing import Any
from typing import Callable
from typing import Union
from typing import Tuple
from typing import Optional
def flatland_env_factory(
evaluation: bool = False,
env_config: Dict[str, Any] = {},
preprocessor: Callable[
[Any], Union[np.ndarray, Tuple[np.ndarray], Dict[str, np.ndarray]]
] = None,
include_agent_info: bool = False,
random_seed: Optional[int] = None,
) -> FlatlandEnvWrapper:
"""Loads a flatand environment and wraps it using the flatland wrapper"""
del evaluation # since it has same behaviour for both train and eval
env = create_rail_env_with_tree_obs(**env_config)
wrapped_env = FlatlandEnvWrapper(env, preprocessor, include_agent_info)
if random_seed and hasattr(wrapped_env, "seed"):
wrapped_env.seed(random_seed)
return wrapped_env
|
a2076ef15964e60b7a5e4cf885e5b92da594f0ac
| 3,645,477
|
import six
def industry(code, market="cn"):
"""获取某个行业的股票列表。目前支持的行业列表具体可以查询以下网址:
https://www.ricequant.com/api/research/chn#research-API-industry
:param code: 行业代码,如 A01, 或者 industry_code.A01
:param market: 地区代码, 如'cn' (Default value = "cn")
:returns: 行业全部股票列表
"""
if not isinstance(code, six.string_types):
code = code.code
else:
code = to_industry_code(code)
return [
v.order_book_id
for v in _all_instruments_list(market)
if v.type == "CS" and v.industry_code == code
]
|
bf5606b93e17d5b5125f6afd133e86b5ded9a03d
| 3,645,478
|
def kewley_agn_oi(log_oi_ha):
"""Seyfert/LINER classification line for log([OI]/Ha)."""
return 1.18 * log_oi_ha + 1.30
|
5e6b71742bec307ad609d855cced80ae08e5c35c
| 3,645,479
|
def XGMMLReader(graph_file):
"""
Arguments:
- `file`:
"""
parser = XGMMLParserHelper()
parser.parseFile(graph_file)
return parser.graph()
|
ef9c1cb101b22f3302cf93db7447431fb1f5cfa8
| 3,645,480
|
def pt_encode(index):
"""pt: Toggle light."""
return MessageEncode(f"09pt{index_to_housecode(index)}00", None)
|
1e2143d7c356736082d4dc25b459630e8c97fe7a
| 3,645,481
|
from qtpy.QtCore import QUrl
from qtpy.QtGui import QDesktopServices
def start_file(filename):
"""
Generalized os.startfile for all platforms supported by Qt
This function is simply wrapping QDesktopServices.openUrl
Returns True if successfull, otherwise returns False.
"""
# We need to use setUrl instead of setPath because this is the only
# cross-platform way to open external files. setPath fails completely on
# Mac and doesn't open non-ascii files on Linux.
# Fixes spyder-ide/spyder#740.
url = QUrl()
url.setUrl(filename)
return QDesktopServices.openUrl(url)
|
269704fdd5bbf4e3d3e35bec6e9862fe36602f22
| 3,645,484
|
def require_context(f):
"""Decorator to require *any* user or admin context.
This does no authorization for user or project access matching, see
:py:func:`authorize_project_context` and
:py:func:`authorize_user_context`.
The first argument to the wrapped function must be the context.
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
raise exception.NotAuthorized()
return f(*args, **kwargs)
return wrapper
|
7a052ddf20b9afff055daed09dbe0963269d46f4
| 3,645,485
|
def failsafe_hull(coords):
"""
Wrapper of ConvexHull which returns None if hull cannot be computed for given points (e.g. all colinear or too few)
"""
coords = np.array(coords)
if coords.shape[0] > 3:
try:
return ConvexHull(coords)
except QhullError as e:
if 'hull precision error' not in str(e) and 'input is less than 3-dimensional' not in str(e):
raise e
return None
|
dca4d35d98032f9c77da38a860c2209758babfda
| 3,645,486
|
def list_closed_poll_sessions(request_ctx, **request_kwargs):
"""
Lists all closed poll sessions available to the current user.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:return: List closed poll sessions
:rtype: requests.Response (with void data)
"""
path = '/v1/poll_sessions/closed'
url = request_ctx.base_api_url + path.format()
response = client.get(request_ctx, url, **request_kwargs)
return response
|
90c2d660a18ed9fa9f10f092a415e5f94148eba1
| 3,645,487
|
from typing import List
import struct
def _wrap_apdu(command: bytes) -> List[bytes]:
"""Return a list of packet to be sent to the device"""
packets = []
header = struct.pack(">H", len(command))
command = header + command
chunks = [command[i : i + _PacketData.FREE] for i in range(0, len(command), _PacketData.FREE)]
# Create a packet for each command chunk
for packet_id in range(len(chunks)):
header = struct.pack(">HBH", _CHANNEL_ID, _CmdTag.APDU, packet_id)
packet = header + chunks[packet_id]
packet.ljust(_PacketData.SIZE, bytes([0x0]))
packets.append(packet)
return packets
|
828521642b43758cf0c43f2c8af171d3463cacf5
| 3,645,488
|
from pathlib import Path
def build_dtree(bins):
"""
Build the directory tree out of what's under `user/`. The `dtree` is a
dict of:
string name -> 2-list [inumber, element]
, where element could be:
- Raw bytes for regular file
- A `dict` for directory, which recurses on
"""
def next_inumber():
"""
Allocate the next available inumber.
"""
global curr_inumber
inumber = curr_inumber
curr_inumber += 1
return inumber
for b in bins:
bpath = Path(b)
if not bpath.is_file():
print("Error: user binary '{}' is not a regular file".format(b))
exit(1)
parts = PurePath(b).parts
parents = parts[1:-1]
binary = parts[-1]
if parts[0] != "user":
print("Error: user binray '{}' is not under 'user/'".format(b))
exit(1)
if not binary.endswith(".bin"):
print("Error: user binray '{}' does not end with '.bin'".format(b))
exit(1)
binary = binary[:-4]
curr_dir = dtree
for d in parents:
if d not in curr_dir:
curr_dir[d] = [next_inumber(), dict()]
curr_dir = curr_dir[d][1]
with bpath.open(mode='br') as bfile:
curr_dir[binary] = [next_inumber(), bytearray(bfile.read())]
|
66248226318a6225ea17d82d535012447b33f7e5
| 3,645,489
|
def _compose_image(digit, background):
"""Difference-blend a digit and a random patch from a background image."""
w, h, _ = background.shape
dw, dh, _ = digit.shape
x = np.random.randint(0, w - dw)
y = np.random.randint(0, h - dh)
bg = background[x:x+dw, y:y+dh]
return np.abs(bg - digit).astype(np.uint8)
|
956e06623f0534bea93b446e9a742ae78aada69f
| 3,645,490
|
def permissions_vsr(func):
"""
:param func:
:return:
"""
def func_wrapper(name):
return "<p>{0}</p>".format(func(name))
return func_wrapper
|
a7e01f7711cab6bc46c004c4d062930c2a656eee
| 3,645,491
|
import scipy
def tri_interpolate_zcoords(points: np.ndarray, triangles: np.ndarray, mesh_points: np.ndarray,
is_mesh_edge: np.ndarray, num_search_tris: int=10):
"""
Interpolate z-coordinates to a set of 2D points using 3D point coordinates and a triangular mesh.
If point is along a mesh boundary, the boundary values are used instead.
Returned values are:
z: The interpolated z-values
"""
# Get triangle centroid coordinates and create KD-tree.
tri_coords = points[triangles,:]
tri_coords2D = points[triangles,0:2]
tri_centroids = np.mean(tri_coords2D, axis=1)
tri_tree = scipy.spatial.cKDTree(tri_centroids)
# Loop over points.
coords2d = mesh_points[:,0:2]
num_mesh_points = coords2d.shape[0]
z = np.zeros(num_mesh_points, dtype=np.float64)
for point_num in range(num_mesh_points):
if not(is_mesh_edge[point_num]):
z[point_num] = project_2d_coords(tri_coords, coords2d[point_num,:], tri_tree, num_search_tris=num_search_tris)
return z
|
0a1702407c8a5b175b8fa8314eede203ac5a86ca
| 3,645,492
|
from typing import List
def getServiceTypes(**kwargs) -> List:
"""List types of services.
Returns:
List of distinct service types.
"""
services = getServices.__wrapped__()
types = [s['type'] for s in services]
uniq_types = [dict(t) for t in {tuple(sorted(d.items())) for d in types}]
return uniq_types
|
23bd7730b43c1d942450fc57c2a3c6f83f7c578c
| 3,645,493
|
from keras.callbacks import EarlyStopping, ModelCheckpoint
import pylab as plt
def train_model(train_data, test_data, model, model_name, optimizer, loss='mse', scale_factor=1000., batch_size=128, max_epochs=200, early_stop=True, plot_history=True):
""" Code to train a given model and save out to the designated path as given by 'model_name'
Parameters
----------
train_data : 2-tuple
(train_x, train_y) where train_x is the images and train_y is the Gaussian dot annotation images in the train split.
test_data : 2-tuple
(test_x, test_y) where test_x is the images and test_y is the Gaussian dot annotation images in the test split.
model : a Keras model
a defined Keras model
optimizer : Keras optimizer object
the gradient descent optimizer e.g. Adam, SGD instance used to optimizer the model. We used Adam() with default settings.
loss : string
one of 'mse' (mean squared error) or 'mae' (mean absolute error)
scale_factor : None or float
multiplicative factor to apply to annotation images to increase the gradient in the backpropagation
batch_size : int
number of images to batch together for training
max_epochs : int
the maximum number of epochs to train for if early_stop is enabled else this is the number of epochs of training.
early_stop : bool
if True, monitors the minimum of the test loss. If loss does not continue to decrease for a set duration, stop the training and return the model with the best test loss.
plot_hist : bool
if True, plots the training and test loss over the training period on the same axes for visualisation.
Returns
-------
None : void
This function will simply save the model to the location given by model_name.
"""
train_x, train_y = train_data
test_x, test_y = test_data
if scale_factor is not None:
train_y = train_y * float(scale_factor)
test_y = test_y * float(scale_factor)
# compile the model with chosen optimizer.
model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
if early_stop:
""" Set some early stopping parameters """
early_stop = EarlyStopping(monitor='val_loss',
min_delta=0.001,
patience=15,
mode='min',
verbose=1)
checkpoint = ModelCheckpoint(model_name,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=1)
history = model.fit(train_x, train_y,
batch_size=batch_size,
epochs=epochs,
validation_data=(test_x, test_y), shuffle=True,
callbacks = [early_stop, checkpoint])
else:
history = model.fit(train_x, train_y,
batch_size=batch_size,
epochs=epochs,
validation_data=(test_x, test_y), shuffle=True)
model.save(model_name) # save the whole model state.
if plot_history:
plt.figure()
plt.plot(history.history['loss'], 'r', label='train loss')
plt.plot(history.history['val_loss'], 'g', label='test loss')
plt.legend()
plt.show()
return []
|
3d74e765065b8514dd43d0a0ba6f83542bc47b11
| 3,645,494
|
def get_pipeline_storage_es_client(session, *, index_date):
"""
Returns an Elasticsearch client for the pipeline-storage cluster.
"""
secret_prefix = f"elasticsearch/pipeline_storage_{index_date}"
host = get_secret_string(session, secret_id=f"{secret_prefix}/public_host")
port = get_secret_string(session, secret_id=f"{secret_prefix}/port")
protocol = get_secret_string(session, secret_id=f"{secret_prefix}/protocol")
username = get_secret_string(
session, secret_id=f"{secret_prefix}/read_only/es_username"
)
password = get_secret_string(
session, secret_id=f"{secret_prefix}/read_only/es_password"
)
return Elasticsearch(f"{protocol}://{username}:{password}@{host}:{port}")
|
8b759f1c2b6fa2b525a0a20653bd1ff99441e893
| 3,645,495
|
def cqcc_resample(s, fs_orig, fs_new, axis=0):
"""implement the resample operation of CQCC
Parameters
----------
s : ``np.ndarray``
the input spectrogram.
fs_orig : ``int``
origin sample rate
fs_new : ``int``
new sample rate
axis : ``int``
the resample axis
Returns
-------
spec_res : ``np.ndarray``
spectrogram after resample
"""
if int(fs_orig) != int(fs_new):
s = resampy.resample(s, sr_orig=fs_orig, sr_new=fs_new,
axis=axis)
return s
|
d252fdc2587c48d15d7f41224df3bfcd9e17693c
| 3,645,496
|
def weights_init():
"""
Gaussian init.
"""
def init_fun(m):
classname = m.__class__.__name__
if (classname.find("Conv") == 0 or classname.find("Linear") == 0) and hasattr(m, "weight"):
nn.init.normal_(m.weight, 0.0, 0.02)
if hasattr(m, "bias") and m.bias is not None:
nn.init.constant_(m.bias, 0.0)
return init_fun
|
f56aa9c988b93d30c6a78769bc0f2c86f0209cd8
| 3,645,497
|
def named(name):
"""
This function is used to decorate middleware functions in order
for their before and after sections to show up during a verbose run.
For examples see documentation to this module and tests.
"""
def new_annotate(mware):
def new_middleware(handler):
new_handler = mware(handler)
def verbose_handler(ctx):
_print_inwards(name)
new_ctx = new_handler(ctx)
_print_outwards(name)
return new_ctx
return verbose_handler
return new_middleware
return new_annotate
|
0f1cef0788eae16bf557b5f7cb01bd52e913203d
| 3,645,498
|
def concatFile(file_list):
""" To combine files in file list.
"""
config = getConfig()
print('[load]concating...')
df_list = []
for f in file_list:
print(f)
tmp = pd.read_csv(config['dir_raw']+f, index_col=None, header=0)
df_list.append(tmp)
df = pd.concat(df_list, axis=0, ignore_index=True)
return df
|
c289db2e1a995f3b536f2d472eed550843980635
| 3,645,499
|
def multiply_str(char, times):
"""
Return multiplied character in string
"""
return char * times
|
cc69f0e16cba1b8c256301567905e861c05291ea
| 3,645,500
|
def calories_per_item(hundr, weight, number_cookies, output_type):
"""
>>> calories_per_item(430, 0.3, 20, 0)
'One item has 64.5 kcal.'
>>> calories_per_item(430, 0.3, 20, 1)
'One item has 64.5 Calories.'
>>> calories_per_item(1, 1000, 10, 1)
'One item has 1000.0 Calories.'
>>> calories_per_item(1, 1000, 10, 0)
'One item has 1000.0 kcal.'
>>> calories_per_item(0, 1000, 10, 0)
'One item has 0.0 kcal.'
"""
kcal_per_item = hundr * 10 # convert kcal per 100g to kcal per kg
unit = 'kcal'
if output_type == 1: # change output unit based on input
unit = 'Calories'
return 'One item has ' + str((kcal_per_item * weight) / number_cookies) + ' ' + unit + '.'
|
9ca16eee8aa8a81424aeaa30f696fb5bec5e3956
| 3,645,501
|
def bitcoind_call(*args):
"""
Run `bitcoind`, return OS return code
"""
_, retcode, _ = run_subprocess("/usr/local/bin/bitcoind", *args)
return retcode
|
efa585a741da1ba3bf94650de1d7296228c15e7e
| 3,645,502
|
def getItemProduct(db, itemID):
"""
Get an item's linked product id
:param db: database pointer
:param itemID: int
:return: int
"""
# Get the one we want
item = db.session.query(Item).filter(Item.id == itemID).first()
# if the query didn't return anything, raise noresult exception
if (not item):
raise NoResult
# otherwise, return the product_id
else:
# Filter the thing off
return item.product_id
|
fbbd2b2108bba78af1abc4714653065e12906ee3
| 3,645,503
|
from typing import Optional
def find_board(board_id: BoardID) -> Optional[Board]:
"""Return the board with that id, or `None` if not found."""
board = db.session.get(DbBoard, board_id)
if board is None:
return None
return _db_entity_to_board(board)
|
16f687304d1008b3704d641a7e9e5e624475e045
| 3,645,504
|
from typing import Any
def test_isin_pattern_0():
"""
Test IsIn pattern which expresses the IsIn/OneOf semantics.
"""
inputs = Tensor(np.ones([42]), mindspore.float16)
softmax_model = nn.Softmax()
@register_pass(run_only_once=True)
def softmax_relu_pass():
x = Any()
softmax_pattern = Prim(P.Softmax())
call_softmax = Call(softmax_pattern, [x])
relu_pattern = Prim(P.ReLU())
call_relu = Call(relu_pattern, [x])
pattern = OneOf([call_softmax, call_relu])
relu6_pattern = Prim(P.ReLU6())
target = Call(relu6_pattern, [x])
return pattern, target
transformed_repr = get_func_graph(softmax_model, inputs).get_return().expanded_str(2)
unregister_pass(softmax_relu_pass)
assert "ReLU6" in transformed_repr
assert "Softmax" not in transformed_repr
|
78e169fcab894c3cf7956884bd3553983fda5bae
| 3,645,505
|
from datetime import datetime
def ENsimtime():
"""retrieves the current simulation time t as datetime.timedelta instance"""
return datetime.timedelta(seconds= _current_simulation_time.value )
|
4dd971b3af9d0a2544e809ea7726521d9ce8e5b1
| 3,645,506
|
def solar_true_longitude(solar_geometric_mean_longitude, solar_equation_of_center):
"""Returns the Solar True Longitude with Solar Geometric Mean Longitude,
solar_geometric_mean_longitude, and Solar Equation of Center,
solar_equation_of_center."""
solar_true_longitude = solar_geometric_mean_longitude + solar_equation_of_center
return solar_true_longitude
|
a335bb82002846eb2bc2106675c13e9f3ee28900
| 3,645,507
|
import base64
def image_to_fingerprint(image, size=FINGERPRINT_SIZE):
"""Create b64encoded image signature for image hash comparisons"""
data = image.copy().convert('L').resize((size, size)).getdata()
return base64.b64encode(bytes(data)).decode()
|
83ff567bce0530b69a9b43c40ea405af825831ff
| 3,645,508
|
from datetime import datetime
import pandas
def get_indices(
time: str | datetime | date, smoothdays: int = None, forcedownload: bool = False
) -> pandas.DataFrame:
"""
alternative going back to 1931:
ftp://ftp.ngdc.noaa.gov/STP/GEOMAGNETIC_DATA/INDICES/KP_AP/
20 year Forecast data from:
https://sail.msfc.nasa.gov/solar_report_archives/May2016Rpt.pdf
"""
dtime = todatetime(time)
fn = downloadfile(dtime, forcedownload)
# %% load data
dat: pandas.DataFrame = load(fn)
# %% optional smoothing over days
if isinstance(smoothdays, int):
periods = np.rint(timedelta(days=smoothdays) / (dat.index[1] - dat.index[0])).astype(int)
if "f107" in dat:
dat["f107s"] = dat["f107"].rolling(periods, min_periods=1).mean()
if "Ap" in dat:
dat["Aps"] = dat["Ap"].rolling(periods, min_periods=1).mean()
# %% pull out the times we want
i = [dat.index.get_loc(t, method="nearest") for t in dtime]
Indices = dat.iloc[i, :]
return Indices
|
e8880caac96e9b3333c2f1f557b5918ee40cdbbe
| 3,645,509
|
import json
def check(device, value):
"""Test for valid setpoint without actually moving."""
value = json.loads(value)
return zmq_single_request("check_value", {"device": device, "value": value})
|
f08e80348f97531ed51207aff685a470ca62bc41
| 3,645,511
|
def get_projectID(base_url, start, teamID, userID):
"""
Get all the project from jama
Args:
base_url (string): jama instance base url
start (int): start at a specific location
teamID (string): user team ID, for OAuth
userID (string): user ID, for OAuth
Returns:
(dict): Returns JSON object of the Jama API /projects
"""
url = base_url + "/rest/latest/projects?startAt=" +\
str(start) + "&maxResults=50"
return api_caller.get(teamID, userID, url)
|
92deaf007530b67be6459c7fd0a0e196dbe18216
| 3,645,513
|
def to_world(points_3d, key2d, root_pos):
""" Trasform coordenates from camera to world coordenates """
_, _, rcams = data_handler.get_data_params()
n_cams = 4
n_joints_h36m = 32
# Add global position back
points_3d = points_3d + np.tile(root_pos, [1, n_joints_h36m])
# Load the appropriate camera
key3d = data_handler.get_key3d(key2d[:3])
subj, _, sname = key3d
subj = int(subj)
cname = sname.split('.')[1] # <-- camera name
scams = {(subj, c+1): rcams[(subj, c+1)] for c in range(n_cams)} # cams of this subject
scam_idx = [scams[(subj, c+1)][-1] for c in range(n_cams)].index(cname) # index of camera used
the_cam = scams[(subj, scam_idx+1)] # <-- the camera used
R, T, f, c, k, p, name = the_cam
assert name == cname
def cam2world_centered(data_3d_camframe):
data_3d_worldframe = cameras.camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T)
data_3d_worldframe = data_3d_worldframe.reshape((-1, n_joints_h36m*3))
# subtract root translation
return data_3d_worldframe - np.tile(data_3d_worldframe[:, :3], (1, n_joints_h36m))
# Apply inverse rotation and translation
return cam2world_centered(points_3d)
|
9b56b946569dac35231282009389a777e908d09f
| 3,645,514
|
def orbital_energies_from_filename(filepath):
"""Returns the orbital energies from the given filename through
functional composition
:param filepath: path to the file
"""
return orbital_energies(spe_list(
lines=list(content_lines(filepath, CMNT_STR))))
|
669bfbe18bb8686e2f9fdc89dcdb3a36aeec6940
| 3,645,515
|
def _dict_merge(a, b):
""" `_dict_merge` deep merges b into a and returns the new dict.
"""
if not isinstance(b, dict):
return b
result = deepcopy(a)
for k, v in b.items():
if k in result and isinstance(result[k], dict):
result[k] = _dict_merge(result[k], v)
else:
result[k] = deepcopy(v)
return result
|
278bfa6f8895fda0ae86b0ff2014602a7e9225df
| 3,645,516
|
import stat
import functools
import operator
def flags(flags: int, modstring: str) -> int:
""" Modifies the stat flags according to *modstring*, mirroring the syntax for POSIX `chmod`. """
mapping = {
'r': (stat.S_IRUSR, stat.S_IRGRP, stat.S_IROTH),
'w': (stat.S_IWUSR, stat.S_IWGRP, stat.S_IWOTH),
'x': (stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH)
}
target, direction = 'a', None
for c in modstring:
if c in '+-':
direction = c
continue
if c in 'ugoa':
target = c
direction = None # Need a - or + after group specifier.
continue
if c in 'rwx' and direction and direction in '+-':
if target == 'a':
mask = functools.reduce(operator.or_, mapping[c])
else:
mask = mapping[c]['ugo'.index(target)]
if direction == '-':
flags &= ~mask
else:
flags |= mask
continue
raise ValueError('invalid chmod: {!r}'.format(modstring))
return flags
|
9acfeb4d9b90a12d2308c0ec992cfbb47f11000c
| 3,645,517
|
def _can_contain(ob1, ob2, other_objects, all_obj_locations, end_frame,
min_dist):
""" Return true if ob1 can contain ob2. """
assert len(other_objects) == len(all_obj_locations)
# Only cones do the contains, and can contain spl or smaller sphere/cones,
# cylinders/cubes are too large
if (len(ob1) == 1 and ob1[0][0]['sized'] > ob2[0][0]['sized'] and
ob1[0][0]['shape'] == 'cone' and
ob2[0][0]['shape'] in ['cone', 'sphere', 'spl']):
# Also make sure the moved object will not collide with anything
# there
collisions = [
_obj_overlap(
# ob2 location since the ob1 will be moved to ob2's location
# but will have the size of ob1,
(ob2[0][1].location[0], ob2[0][1].location[1],
ob1[0][1].location[2]),
ob1[0][0]['sized'],
# top objects location at the end point, and its size
other_locations[0][end_frame], other_obj[0][0]['sized'],
min_dist)
for other_obj, other_locations in
zip(other_objects, all_obj_locations)]
if not any(collisions):
return True
return False
|
391119dae5e86efe0c99bae7c603a1f785c69c04
| 3,645,518
|
def twolmodel(attr, pulse='on'):
"""
This is the 2-layer ocean model
requires a forcing in W/m2
pulse = on - radiative pulse W/m2
pulse = off - time varyin radaitive forcing W/m2/yr
pulse = time - use output from simple carbon model
"""
#### Parameters ####
yeartosec = 30.25*24*60*60*12
rho = 1025 # density of sea water kg/m3
cw = 3985 # specific heat of sea water J/KgK
###################
# define time steps of the model
timesteps = np.arange(0,attr['endtime']+attr['dt'],attr['dt'])
df = pd.DataFrame(index=timesteps,columns=['T_sfc','T_deep'],data=np.zeros((len(timesteps), 2)))
for t in range(len(timesteps)-1):
if pulse is 'on':
if t == 0:
df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + attr['R'] + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc']))
df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep']))
else:
df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + 0 + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc']))
df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep']))
elif pulse is 'off':
df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + attr['R']*timesteps[t] + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc']))
df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep']))
elif pulse is 'time':
df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + attr['R'][t] + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc']))
df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep']))
return df
|
4f6649a8df1febe54a6c04fdee938591a0c997b2
| 3,645,519
|
def maximumToys(prices, k):
"""Problem solution."""
prices.sort()
c = 0
for toy in prices:
if toy > k:
return c
else:
k -= toy
c += 1
return c
|
0ce709ff7b106b5379217cb6b7f1f481d27c94e7
| 3,645,520
|
def get_X_HBR_d_t_i(X_star_HBR_d_t):
"""(47)
Args:
X_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の絶対湿度(kg/kg(DA))
Returns:
日付dの時刻tにおける暖冷房区画iの実際の居室の絶対湿度(kg/kg(DA))
"""
X_star_HBR_d_t_i = np.tile(X_star_HBR_d_t, (5, 1))
return X_star_HBR_d_t_i
|
125d70ff96ce1a035df98d6995aa55ea3728ffa9
| 3,645,522
|
from typing import Optional
from typing import Dict
from typing import Callable
from typing import Any
def add_route(url: str,
response: Optional[str] = None,
method: str = 'GET',
response_type: str = 'JSON',
status_code: int = 200,
headers: Optional[Dict[str, str]] = None,
callback: Optional[Callable[[Any], None]] = None,
) -> None:
"""
Add route to app.
:param url: the URL rule as string
:param response: return value
:param method: HTTP method
:param response_type: type of response (JSON, HTML, RSS)
:param status_code: return status code
:param headers: return headers
:param callback: function will be executes before response returns
"""
endpoint = '{url}::{method}::{status_code}'.format(
url=url, method=method, status_code=status_code
)
@app.route(url, endpoint=endpoint, methods=[method])
def handler(*args, **kwargs):
if callback is not None:
callback(request, *args, **kwargs)
json_response = jsonify(response)
if headers is not None:
json_response.headers.update(headers)
return json_response, status_code
|
f103b6d6faffff4a816fdf7c3c0124ea41622fe1
| 3,645,523
|
def findUsername(data):
"""Find a username in a Element
Args:
data (xml.etree.ElementTree.Element): XML from PMS as a Element
Returns:
username or None
"""
elem = data.find('User')
if elem is not None:
return elem.attrib.get('title')
return None
|
f7b6bb816b9eeeca7e865582935a157cdf276928
| 3,645,524
|
def GET(request):
"""Get this Prefab."""
request.check_required_parameters(path={'prefabId': 'string'})
prefab = Prefab.from_id(request.params_path['prefabId'])
prefab.check_exists()
prefab.check_user_access(request.google_id)
return Response(200, 'Successfully retrieved prefab', prefab.obj)
|
07a7078cb73893309372c0a8d48857eefc77a41e
| 3,645,526
|
def fix_empty_strings(tweet_dic):
"""空文字列を None に置換する"""
def fix_media_info(media_dic):
for k in ['title', 'description']:
if media_dic.get('additional_media_info', {}).get(k) == '':
media_dic['additional_media_info'][k] = None
return media_dic
for m in tweet_dic.get('entities', {}).get('media', []):
m = fix_media_info(m)
for m in tweet_dic.get('extended_entities', {}).get('media', []):
m = fix_media_info(m)
for m in tweet_dic.get('extended_tweet', {}).get('entities', {}).get('media', []):
m = fix_media_info(m)
for m in tweet_dic.get('extended_tweet', {}).get('extended_entities', {}).get('media', []):
m = fix_media_info(m)
for k in [
'profile_background_image_url',
'profile_background_image_url_https',
'profile_image_url',
'profile_image_url_https',
]:
if tweet_dic.get('user', {}).get(k) == '':
tweet_dic['user'][k] = None
return tweet_dic
|
436daaeb9b96b60867d27812ed7388892ab79b1a
| 3,645,527
|
import math
def fibonacci(**kwargs):
"""Fibonacci Sequence as a numpy array"""
n = int(math.fabs(kwargs.pop('n', 2)))
zero = kwargs.pop('zero', False)
weighted = kwargs.pop('weighted', False)
if zero:
a, b = 0, 1
else:
n -= 1
a, b = 1, 1
result = np.array([a])
for i in range(0, n):
a, b = b, a + b
result = np.append(result, a)
if weighted:
fib_sum = np.sum(result)
if fib_sum > 0:
return result / fib_sum
else:
return result
else:
return result
|
055d157120866c9bfe74374d62cffcc8f599d4bb
| 3,645,529
|
def read_data():
"""Reads in the data from (currently) only the development file
and returns this as a list. Pops the last element, because it is empty."""
with open('../PMB/parsing/layer_data/4.0.0/en/gold/dev.conll') as file:
data = file.read()
data = data.split('\n\n')
data.pop(-1)
return data
|
da75e237bbc7b2168cd5af76eefaf389b29d4b30
| 3,645,530
|
def argrelextrema(data, comparator, axis=0, order=1, mode='clip'):
"""
Calculate the relative extrema of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative extrema.
comparator : callable
Function to use to compare two data points.
Should take two arrays as arguments.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated. 'wrap' (wrap around) or
'clip' (treat overflow as the same as the last (or first) element).
Default is 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
See Also
--------
argrelmin, argrelmax
Notes
-----
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy.signal import argrelextrema
>>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0])
>>> argrelextrema(x, np.greater)
(array([3, 6]),)
>>> y = np.array([[1, 2, 1, 2],
... [2, 2, 0, 0],
... [5, 3, 4, 4]])
...
>>> argrelextrema(y, np.less, axis=1)
(array([0, 2]), array([2, 1]))
"""
results = _boolrelextrema(data, comparator,
axis, order, mode)
return np.where(results)
|
66d565fad5672615f1340979a3c59e5abbbab3f5
| 3,645,531
|
import math
def dijkstra(G, s):
"""
find all shortest paths from s to each other vertex in graph G
"""
n = len(G)
visited = [False]*n
weights = [math.inf]*n
path = [None]*n
queue = []
weights[s] = 0
hq.heappush(queue, (0, s))
while len(queue) > 0:
g, u = hq.heappop(queue)
visited[u] = True
for v, w in G[u]:
if not visited[v]:
print(v, w, g, u)
f = g + w
if f < weights[v]:
weights[v] = f
path[v] = u
hq.heappush(queue, (f, v))
return path, weights
|
85069b177ac646f449ce8e3ccf6d9c5b9de7b2e3
| 3,645,532
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.