content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def ordered_links(d, k0, k1):
"""
find ordered links starting from the link (k0, k1)
Parameters
==========
d : dict for the graph
k0, k1: adjacents nodes of the graphs
Examples
========
>>> from active_nodes import ordered_links
>>> d = {0:[1,4], 1:[0,2], 2:[1,3], 3:[2,4], 4:[0,3]}
>>> ordered_links(d, 0, 1)
[(0, 1), (0, 4), (1, 2), (2, 3), (3, 4)]
"""
assert k0 in d
assert k1 in d[k0]
dx = defaultdict(list)
links = []
_append_link(dx, links, k0, k1)
r = _add_links1(links, d, dx)
while 1:
active = [k for k in dx if 0 < len(dx[k]) < len(d[k])]
if not active:
break
a1 = _short_path_active_nodes_all(d, dx, active)
if a1 is None:
break
a2 = _add_paths(d, dx, links, a1)
return links
|
472e9e7d459e8a574de8edd5272c96b648b50207
| 3,640,868
|
def _exceeded_threshold(number_of_retries: int, maximum_retries: int) -> bool:
"""Return True if the number of retries has been exceeded.
Args:
number_of_retries: The number of retry attempts made already.
maximum_retries: The maximum number of retry attempts to make.
Returns:
True if the maximum number of retry attempts have already been
made.
"""
if maximum_retries is None:
# Retry forever.
return False
return number_of_retries >= maximum_retries
|
c434e1e752856f9160d40e25ac20dde0583e50a6
| 3,640,869
|
import json
def _get_and_check_response(method, host, url, body=None, headers=None, files=None, data=None, timeout=30):
"""Wait for the HTTPS response and throw an exception if the return
status is not OK. Return either a dict based on the
HTTP response in JSON, or if the response is not in JSON format,
return a tuple containing the data in the body and the content type.
"""
url = 'https://' + host + url
# print(url)
if files:
res = https_session.post(url, files=files, data=data, timeout=timeout)
elif method == 'POST':
res = https_session.post(url, body, headers, timeout=timeout)
else:
res = https_session.get(url, timeout=timeout)
res.raise_for_status()
content_type = res.headers.get(CONTENT_TYPE, None)
content = res.text
if content_type and content_type.startswith(CONTENT_TYPE_JSON):
# Quickfix to remove second key in bad API response
key = '"FaxContainerFile":'
if content.count(key) == 2:
content = content[:content.rfind(key)].rstrip(',') + "}"
return json.loads(content)
else:
return (content, content_type)
|
559d85ee8f7d21445e5cfa0acc464b3e9ad98fe3
| 3,640,870
|
def moveb_m_human(agents, self_state, self_name, c, goal):
"""
This method implements the following block-stacking algorithm:
If there's a block that can be moved to its final position, then
do so and call move_blocks recursively. Otherwise, if there's a
block that needs to be moved and can be moved to the table, then
do so and call move_blocks recursively. Otherwise, no blocks need
to be moved.
"""
if self_name in self_state.isReachableBy[c] and c in goal.isOnStack and goal.isOnStack[c] and not self_state.isOnStack[c]:
return [("human_pick", c), ("human_stack",)]
return []
|
f99fd14b2091a1e8d0426dcef57ce33b96fc1352
| 3,640,871
|
import tkinter
def BooleanVar(default, callback=None):
"""
Return a new (initialized) `tkinter.BooleanVar`.
@param default the variable initial value
@param callback function to invoke whenever the variable changes its value
@return the created variable
"""
return _var(tkinter.BooleanVar, default, callback)
|
451a43da5e9eb506fe8b928fa7f4e986c8da6b69
| 3,640,873
|
import re
def parse_header(source):
"""Copied from textgrid.parse_header"""
header = source.readline() # header junk
m = re.match('File type = "([\w ]+)"', header)
if m is None or not m.groups()[0].startswith('ooTextFile'):
raise ValueError('The file could not be parsed as a Praat text file as '
'it is lacking a proper header.')
short = 'short' in m.groups()[0]
file_type = parse_line(source.readline(), short, '') # header junk
t = source.readline() # header junk
return file_type, short
|
ff47296868f93cbe55d15b29a2245ceb14ed5460
| 3,640,874
|
from datetime import datetime
def create_amsterdam(*args):
"""
Creates a new droplet with sensible defaults
Usage:
[name]
Arguments:
name: (optional) name to give the droplet; if missing, current timestamp
"""
name = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H-%M-%S.%f")
try:
name = args[0]
except:
pass
return create_small_droplet(name, 'ams2', 'ubuntu-17-04-x64')
|
ed01c67db180894bbcf2cdfee4cd2f45633cc637
| 3,640,875
|
def convert_inp(float_inp):
"""
Convert inp from decimal value (0.000, 0.333, 0.667, etc) to (0.0, 0.1, 0.2) for cleaner display.
:param float float_inp: inning pitching float value
:return:
"""
# Split inp into integer and decimal parts
i_inp, d_inp = divmod(float_inp, 1)
d_inp = d_inp*10
# Look at first digit of decimal part
# NOTE: repr(3)[0] = 2 and repr(6) = 5, not sure why?
if int(repr(d_inp)[0]) == 0:
disp_inp = i_inp + 0.0
elif int(repr(d_inp)[0]) == 3 or int(repr(d_inp)[0]) == 2:
disp_inp = i_inp + 0.1
elif int(repr(d_inp)[0]) == 6 or int(repr(d_inp)[0]) == 7 or int(repr(d_inp)[0]) == 5:
disp_inp = i_inp + 0.2
else:
print "{0} innings is not a standard amount!".format(float_inp)
return None
return disp_inp
|
ce0e196ca570b02787842db3ec2efb6ac529685c
| 3,640,876
|
def is_ipv4(line):
"""检查是否是IPv4"""
if line.find("ipv4") < 6: return False
return True
|
bd602f5a9ac74d2bd115fe85c90490556932e068
| 3,640,878
|
def format_ica_lat(ff_lat):
"""
conversão de uma latitude em graus para o formato GGMM.mmmH
@param ff_lat: latitude em graus
@return string no formato GGMM.mmmH
"""
# logger
# M_LOG.info(">> format_ica_lat")
# converte os graus para D/M/S
lf_deg, lf_min, lf_seg = deg2dms(ff_lat)
# converte para GGMM.mmm
lf_deg = (abs(lf_deg) * 100) + lf_min + (lf_seg / 60.)
# return latitude
# return "{}{:4.3f}".format('S' if ff_lat <= 0 else 'N', lf_deg)
return "{:4.3f}{}".format(lf_deg, 'S' if ff_lat <= 0 else 'N')
|
d1e6f111e70ec7bd532e3d14afe3c90dc99cb8f8
| 3,640,879
|
def loadData (x_file="ass1_data/linearX.csv", y_file="ass1_data/linearY.csv"):
"""
Loads the X, Y matrices.
Splits into training, validation and test sets
"""
X = np.genfromtxt(x_file)
Y = np.genfromtxt(y_file)
Z = [X, Y]
Z = np.c_[X.reshape(len(X), -1), Y.reshape(len(Y), -1)]
np.random.shuffle(Z)
# Partition the data into three sets
size = len(Z)
training_size = int(0.8 * size)
validation_size = int(0.1 * size)
test_size = int(0.1 * size)
training_Z = Z[0:training_size]
validation_Z = Z[training_size:training_size+validation_size]
test_Z = Z[training_size+validation_size:]
return (Z[:,0], Z[:,1])
|
18fb7269f2b853b089494e6021d765d76a148711
| 3,640,880
|
async def retrieve_users():
"""
Retrieve all users in collection
"""
users = []
async for user in user_collection.find():
users.append(user_parser(user))
return users
|
914969f7beb75a9409e370b9e2453c681c37ff42
| 3,640,881
|
import hashlib
def get_file_hash(path):
"""파일 해쉬 구하기."""
hash = None
md5 = hashlib.md5()
with open(path, 'rb') as f:
data = f.read()
md5.update(data)
hash = md5.hexdigest()
info("get_file_hash from {}: {}".format(path, hash))
return hash
|
a024b0002c019ec9bae4fca40e68919c6236b2fa
| 3,640,882
|
from nipy.labs.spatial_models.discrete_domain import \
def apply_repro_analysis(dataset, thresholds=[3.0], method = 'crfx'):
"""
perform the reproducibility analysis according to the
"""
grid_domain_from_binary_array
n_subj, dimx, dimy = dataset.shape
func = np.reshape(dataset,(n_subj, dimx * dimy)).T
var = np.ones((dimx * dimy, n_subj))
domain = grid_domain_from_binary_array(np.ones((dimx, dimy, 1)))
ngroups = 5
sigma = 2.0
csize = 10
niter = 5
verbose = 0
swap = False
kap, clt, pkd = [], [], []
for threshold in thresholds:
kappa, cls, pks = [], [], []
kwargs = {'threshold':threshold, 'csize':csize}
for i in range(niter):
k = voxel_reproducibility(func, var, domain, ngroups,
method, swap, verbose, **kwargs)
kappa.append(k)
cld = cluster_reproducibility(func, var, domain, ngroups, sigma,
method, swap, verbose, **kwargs)
cls.append(cld)
pk = peak_reproducibility(func, var, domain, ngroups, sigma,
method, swap, verbose, **kwargs)
pks.append(pk)
kap.append(np.array(kappa))
clt.append(np.array(cls))
pkd.append(np.array(pks))
kap = np.array(kap)
clt = np.array(clt)
pkd = np.array(pkd)
return kap, clt, pkd
|
cffb667b80b0a049856dc7c11db6d81fd9521f49
| 3,640,883
|
def api_root(request):
"""
Logging root
"""
rtn = dict(
message="Hello, {}. You're at the logs api index.".format(request.user.username),
)
return Response(rtn)
|
b002724baefccdd0cd0dcc324fa23d9902186351
| 3,640,884
|
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
df = pd.read_csv(filename)
df.dropna(inplace=True)
df.drop(['long', 'date', 'lat', 'id'], axis=1, inplace=True)
df = df.drop(df.index[df.bedrooms <= 0])
df = df.drop(df.index[df.sqft_living <= 0])
df = df.drop(df.index[df.floors <= 0])
df = df.drop(df.index[df.bathrooms < 0])
df = df.drop(df.index[df.price < 0])
# df = pd.get_dummies(df, columns=['zipcode'])
df['yr_built_or_renovated'] = df[['yr_built', 'yr_renovated']].max(axis=1)
df.drop(['yr_built', 'yr_renovated'], axis=1, inplace=True)
price = df.pop('price')
return df, price
|
412b197274ae4ca06e4cc7f9cd4b7d7b7c5934a0
| 3,640,885
|
def getcollength(a):
"""
Get the length of a matrix view object
"""
t=getType(a)
f={'mview_f':vsip_mgetcollength_f,
'mview_d':vsip_mgetcollength_d,
'mview_i':vsip_mgetcollength_i,
'mview_si':vsip_mgetcollength_si,
'mview_uc':vsip_mgetcollength_uc,
'cmview_f':vsip_cmgetcollength_f,
'cmview_d':vsip_cmgetcollength_d,
'mview_bl':vsip_mgetcollength_bl }
assert t[0] and t[1] in f,'Type <:%s:> not a supported type for for getcollength'%t[1]
return f[t[1]](a)
|
fe4b4c69f1631c0e571cd1590aa8eeb8fa5bc7bb
| 3,640,887
|
from unittest.mock import patch
def test_coinbase_query_balances(function_scope_coinbase):
"""Test that coinbase balance query works fine for the happy path"""
coinbase = function_scope_coinbase
def mock_coinbase_accounts(url, timeout): # pylint: disable=unused-argument
response = MockResponse(
200,
"""
{
"pagination": {
"ending_before": null,
"starting_after": null,
"limit": 25,
"order": "desc",
"previous_uri": null,
"next_uri": null
},
"data": [
{
"id": "58542935-67b5-56e1-a3f9-42686e07fa40",
"name": "My Vault",
"primary": false,
"type": "vault",
"currency": "BTC",
"balance": {
"amount": "4.00000000",
"currency": "BTC"
},
"created_at": "2015-01-31T20:49:02Z",
"updated_at": "2015-01-31T20:49:02Z",
"resource": "account",
"resource_path": "/v2/accounts/58542935-67b5-56e1-a3f9-42686e07fa40",
"ready": true
},
{
"id": "2bbf394c-193b-5b2a-9155-3b4732659ede",
"name": "My Wallet",
"primary": true,
"type": "wallet",
"currency": "ETH",
"balance": {
"amount": "39.59000000",
"currency": "ETH"
},
"created_at": "2015-01-31T20:49:02Z",
"updated_at": "2015-01-31T20:49:02Z",
"resource": "account",
"resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede"
},
{
"id": "68542935-67b5-56e1-a3f9-42686e07fa40",
"name": "Another Wallet",
"primary": false,
"type": "vault",
"currency": "BTC",
"balance": {
"amount": "1.230000000",
"currency": "BTC"
},
"created_at": "2015-01-31T20:49:02Z",
"updated_at": "2015-01-31T20:49:02Z",
"resource": "account",
"resource_path": "/v2/accounts/68542935-67b5-56e1-a3f9-42686e07fa40",
"ready": true
}
]
}
""",
)
return response
with patch.object(coinbase.session, 'get', side_effect=mock_coinbase_accounts):
balances, msg = coinbase.query_balances()
assert msg == ''
assert len(balances) == 2
assert balances[A_BTC].amount == FVal('5.23')
assert balances[A_BTC].usd_value == FVal('7.8450000000')
assert balances[A_ETH].amount == FVal('39.59')
assert balances[A_ETH].usd_value == FVal('59.385000000')
warnings = coinbase.msg_aggregator.consume_warnings()
errors = coinbase.msg_aggregator.consume_errors()
assert len(warnings) == 0
assert len(errors) == 0
|
d25d8d31ae5a7c22559c322edeed53404fc179ab
| 3,640,888
|
def process_phase_boundary(fname):
"""
Processes the phase boundary file, computed mean and standard deviations
"""
singlets = []
chem_pot = []
temperatures = []
with h5.File(fname, 'r') as hfile:
for name in hfile.keys():
grp = hfile[name]
singlets.append(np.array(grp["singlets"]))
chem_pot.append(np.array(grp["chem_pot"]))
temperatures.append(np.array(grp["temperatures"]))
max_temp = 0.0
min_temp = 10000000.0
for temp_array in temperatures:
if np.max(temp_array) > max_temp:
max_temp = np.max(temp_array)
if np.min(temp_array) < min_temp:
min_temp = np.min(temp_array)
temp_linspace = np.linspace(min_temp, max_temp, 200)
result = {}
result["chem_pot"] = []
result["std_chem_pot"] = []
result["singlets"] = []
result["std_singlets"] = []
result["num_visits"] = []
result["temperature"] = temp_linspace
for sing_dset in singlets:
if np.any(sing_dset.shape != singlets[0].shape):
msg = "Invalid file! Looks like it contains phase boundary\n"
msg += " data for different systems"
raise ValueError(msg)
num_chem_pots = chem_pot[0].shape[1]
for i in range(num_chem_pots):
mu_averager = DatasetAverager(temp_linspace)
for temps, mu in zip(temperatures, chem_pot):
mu_averager.add_dataset(temps, mu[:,i])
mu_res = mu_averager.get()
result["chem_pot"].append(mu_res["y_values"])
result["std_chem_pot"].append(mu_res["std_y"])
result["num_visits"].append(mu_res["num_visits"])
num_singlets = singlets[0].shape[1]
for i in range(num_chem_pots):
for temp, singl in zip(temperatures, singlets):
singlet_averager = DatasetAverager(temp_linspace)
singlet = []
std_singlet = []
for j in range(num_singlets):
singlet_averager.add_dataset(temps, singl[:,j,i])
singl_res = singlet_averager.get()
singlet.append(singl_res["y_values"])
std_singlet.append(singl_res["std_y"])
result["singlets"].append(singlet)
result["std_singlets"].append(std_singlet)
return result
|
4e7f01e3265566f03fa4e7e21f13cb48a1777c9c
| 3,640,889
|
def blackman_window(shape, normalization=1):
"""
Create a 3d Blackman window based on shape.
:param shape: tuple, shape of the 3d window
:param normalization: value of the integral of the backman window
:return: the 3d Blackman window
"""
nbz, nby, nbx = shape
array_z = np.blackman(nbz)
array_y = np.blackman(nby)
array_x = np.blackman(nbx)
blackman2 = np.ones((nbz, nby))
blackman3 = np.ones((nbz, nby, nbx))
for idz in range(nbz):
blackman2[idz, :] = array_z[idz] * array_y
for idy in range(nby):
blackman3[idz, idy] = blackman2[idz, idy] * array_x
blackman3 = blackman3 / blackman3.sum() * normalization
return blackman3
|
45ae8132aad01319e1728f0a4355dda4d5d7d145
| 3,640,891
|
def asset_movements_from_dictlist(given_data, start_ts, end_ts):
""" Gets a list of dict asset movements, most probably read from the json files and
a time period. Returns it as a list of the AssetMovement tuples that are inside the time period
"""
returned_movements = list()
for movement in given_data:
if movement['timestamp'] < start_ts:
continue
if movement['timestamp'] > end_ts:
break
returned_movements.append(AssetMovement(
exchange=movement['exchange'],
category=movement['category'],
timestamp=movement['timestamp'],
asset=movement['asset'],
amount=FVal(movement['amount']),
fee=FVal(movement['fee']),
))
return returned_movements
|
b21355ad65c2603559ea00650d4ea6dd2a7d94f0
| 3,640,892
|
def update_work(work_id):
"""
Route permettant de modifier les données d'une collection
:param work_id: ID de l'oeuvre récupérée depuis la page oeuvre
:return: redirection ou template update-work.html
:rtype: template
"""
if request.method == "GET":
updateWork = Work.query.get(work_id)
return render_template("pages/update-work.html", updateWork=updateWork)
else:
status, data = Work.update_work(
work_id=work_id,
title=request.form.get("title", None),
author=request.form.get("author", None),
date=request.form.get("date", None),
medium=request.form.get("medium", None),
dimensions=request.form.get("dimensions", None),
image=request.form.get("image", None)
)
if status is True:
flash("Modification réussie !", "success")
return redirect("/collections")
else:
flash("Les erreurs suivantes ont été rencontrées : " + ", ".join(data), "danger")
updateWork = Work.query.get(work_id)
return render_template("pages/update-work.html", nom="CollectArt", updateWork=updateWork)
|
aed65c45d53fa9d7b551df6909fdece488f2ab65
| 3,640,893
|
def login_view(request):
"""Login user view"""
if request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
user = authenticate(request, username=email, password=password)
if user is not None:
login(request, user)
return redirect('/')
else:
messages.info(request, 'Username Or Password is incorrect.')
context = {}
return render(request, 'pages/login.html', context)
|
702a3aa5a90cd5a5386a4fa3b74ab4b36d3748bb
| 3,640,894
|
def mse(im1, im2):
"""Compute the Mean Squared Error.
Compute the Mean Squared Error between the two images, i.e. sum of the squared difference.
Args:
im1 (ndarray): First array.
im2 (ndarray): Second array.
Returns:
float: Mean Squared Error.
"""
im1 = np.asarray(im1)
im2 = np.asarray(im2)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
err = np.sum((im1.astype("float") - im2.astype("float")) ** 2)
err /= float(im1.shape[0] * im1.shape[1])
return err
|
3d14472d3eb211855b53174990c3201bbae49086
| 3,640,896
|
import torch
def bert_text_preparation(text, tokenizer):
"""Preparing the input for BERT
Takes a string argument and performs
pre-processing like adding special tokens,
tokenization, tokens to ids, and tokens to
segment ids. All tokens are mapped to seg-
ment id = 1.
Args:
text (str): Text to be converted
tokenizer (obj): Tokenizer object
to convert text into BERT-re-
adable tokens and ids
Returns:
list: List of BERT-readable tokens
obj: Torch tensor with token ids
obj: Torch tensor segment ids
"""
marked_text = "[CLS] " + text + " [SEP]"
tokenized_text = tokenizer.tokenize(marked_text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
segments_ids = [1]*len(indexed_tokens)
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
return tokenized_text, tokens_tensor, segments_tensors
|
f9b3de4062fd0cc554e51bd02c750daea0a8250c
| 3,640,897
|
def possibly_equal(first, second):
"""Equality comparison that propagates uncertainty.
It represents uncertainty using its own function object."""
if first is possibly_equal or second is possibly_equal:
return possibly_equal #Propagate the possibilities
return first == second
|
12662df45d6ee0c6e1aadb6a5c4c0ced9352af35
| 3,640,898
|
def get_logs():
"""
Endpoint used by Slack /logs command
"""
req = request.values
logger.info(f'Log request received: {req}')
if not can_view_logs(req['user_id']):
logger.info(f"{req['user_name']} attempted to view logs and was denied")
return make_response("You are not authorized to do that.", 200)
url = get_temporary_url(req['user_id'], req['text'])
logger.info(f"Created log URL for {req['user_name']} : {url.url}")
return make_response(f'{request.host_url}logs/{url.url}', 200)
|
9708515dbd70c6e817f21c474fa1e96a26a1e9b4
| 3,640,899
|
def list_volumes(vg):
"""List logical volumes paths for given volume group.
:param vg: volume group name
:returns: Return a logical volume list for given volume group
: Data format example
: ['volume-aaa', 'volume-bbb', 'volume-ccc']
"""
out, err = utils.execute('lvs', '--noheadings', '-o', 'lv_name', vg,
run_as_root=True)
return [line.strip() for line in out.splitlines()]
|
4cd613c8c10aaec443dce31cef8b132e3b2c65da
| 3,640,900
|
def question_aligned_passage_embedding(question_lstm_outs, document_embeddings,
passage_aligned_embedding_dim):
"""create question aligned passage embedding.
Arguments:
- question_lstm_outs: The dimension of output of LSTM that process
question word embedding.
- document_embeddings: The document embeddings.
- passage_aligned_embedding_dim: The dimension of passage aligned
embedding.
"""
def outer_sentence_step(document_embeddings, question_lstm_outs,
passage_aligned_embedding_dim):
"""step function for PaddlePaddle's recurrent_group.
In this function, the original input document_embeddings are scattered
from nested sequence into sequence by recurrent_group in PaddlePaddle.
The step function iterates over each sentence in the document.
Arguments:
- document_embeddings: The word embeddings of the document.
- question_lstm_outs: The dimension of output of LSTM that
process question word embedding.
- passage_aligned_embedding_dim: The dimension of passage aligned
embedding.
"""
def inner_word_step(word_embedding, question_lstm_outs,
question_outs_proj, passage_aligned_embedding_dim):
"""
In this recurrent_group, sentence embedding has been scattered into
word embeddings. The step function iterates over each word in one
sentence in the document.
Arguments:
- word_embedding: The word embeddings of documents.
- question_lstm_outs: The dimension of output of LSTM that
process question word embedding.
- question_outs_proj: The projection of question_lstm_outs
into a new hidden space.
- passage_aligned_embedding_dim: The dimension of passage
aligned embedding.
"""
doc_word_expand = paddle.layer.expand(
input=word_embedding,
expand_as=question_lstm_outs,
expand_level=paddle.layer.ExpandLevel.FROM_NO_SEQUENCE)
weights = paddle.layer.fc(
input=[question_lstm_outs, doc_word_expand],
size=1,
bias_attr=False,
act=paddle.activation.SequenceSoftmax())
weighted_candidates = paddle.layer.scaling(
input=question_outs_proj, weight=weights)
return paddle.layer.pooling(
input=weighted_candidates, pooling_type=paddle.pooling.Sum())
question_outs_proj = paddle.layer.fc(
input=question_lstm_outs,
bias_attr=False,
size=passage_aligned_embedding_dim)
return paddle.layer.recurrent_group(
input=[
paddle.layer.SubsequenceInput(document_embeddings),
paddle.layer.StaticInput(question_lstm_outs),
paddle.layer.StaticInput(question_outs_proj),
passage_aligned_embedding_dim,
],
step=inner_word_step,
name="iter_over_word")
return paddle.layer.recurrent_group(
input=[
paddle.layer.SubsequenceInput(document_embeddings),
paddle.layer.StaticInput(question_lstm_outs),
passage_aligned_embedding_dim
],
step=outer_sentence_step,
name="iter_over_sen")
|
8dbcb298a24ec18da4904a8f48a7c63331b27c91
| 3,640,901
|
def lm_loss_fn(forward_fn, vocab_size, params, rng, data, is_training=True):
"""Compute the loss on data wrt params."""
logits = forward_fn(params, rng, data, is_training)
targets = hk.one_hot(data['target'], vocab_size)
assert logits.shape == targets.shape
mask = jnp.greater(data['obs'], 0)
loss = -jnp.sum(targets * jax.nn.log_softmax(logits), axis=-1)
loss = jnp.sum(loss * mask) / jnp.sum(mask)
return loss
|
44188d717759a82d80079b5e4f7309b3cf7b5cb0
| 3,640,902
|
def chroms_from_build(build):
""" Get list of chromosomes from a particular genome build
Args:
build str
Returns:
chrom_list list
"""
chroms = {'grch37': [str(i) for i in range(1, 23)],
'hg19': ['chr{}'.format(i) for i in range(1, 23)]
# chroms = {'grch37': [i for i in range(1, 23)] + ['X', 'Y'],
}
try:
return chroms[build]
except KeyError:
raise ValueError("Oops, I don't recognize the build {}".format(build))
|
c87431911c07c00aaa63357771258394cfff859e
| 3,640,904
|
def get_ready_count_string(room: str) -> str:
"""Returns a string representing how many players in a room are ready.
Args:
room (str): The room code of the players.
Returns:
str: A string representing how many players in a room are ready in the format '[ready]/[not ready]'.
"""
player_count = 0
ready_count = 0
players = get_players(room)
for player in players:
if player.is_alive:
player_count += 1
if player.ready:
ready_count += 1
return f'{ready_count}/{player_count}'
|
eb8ae2a308ccd58355de5a8a15629bfccd1fcc2c
| 3,640,905
|
from typing import List
def switches(topology: 'Topology') -> List['Node']:
"""
@param topology:
@return:
"""
return filter_nodes(topology, type=DeviceType.SWITCH)
|
e489740b29f8aff7368147274d020cb467422669
| 3,640,906
|
def geometric_progression(init, ratio):
"""
Generate a geometric progression start form 'init' and multiplying
'ratio'.
"""
return _iterate(lambda x: x * ratio, init)
|
6b2626bc9d4016518b1cc7e41b63d34924c1ee30
| 3,640,907
|
import urllib
def resolve(marathon_lb_url):
"""Return the individual URLs for all available Marathon-LB instances given
a single URL to a DNS-balanced Marathon-LB cluster.
Marathon-LB typically uses DNS for load balancing between instances and so
the address provided by the user may actually be multiple load-balanced
instances. This function uses DNS to lookup the hostnames (IPv4 A-records)
of each instance, returning them all to the caller for use as required.
"""
url = urllib.parse.urlparse(marathon_lb_url)
all_hosts = _get_alias_records(url.hostname)
resolved_urls = _reassemble_urls(url, all_hosts)
return resolved_urls
|
f192d66a8a12d772ad33b2b8030796af2393ec16
| 3,640,908
|
def _parse_bluetooth_info(data):
"""
"""
# Combine the bytes as a char string and then strip off extra bytes.
name = ''.join(chr(i) for i in data[:16]).partition('\0')[0]
return BluetoothInfo(name,
''.join(chr(i) for i in data[16:28]),
''.join(chr(i) for i in data[29:]))
|
ef46576102cfb5d1df0b40e84529a89e2ed6bfa8
| 3,640,909
|
async def get_reverse_objects_topranked_for_lst(entities):
"""
get pairs that point to the given entity as the primary property
primary properties are those with the highest rank per property
"""
# run the query
res = await runQuerySingleKey(cacheReverseObjectTop, entities, """
SELECT ?base ?prop ?parent
WHERE {
VALUES ?base { %s }
?parent ?prop ?base .
FILTER( ?prop NOT IN (""" + ex_cls + """) ) # exclude wikilinks and redirects
}
LIMIT """ + str(config.RESULTS_LIMIT) + """
""")
return res
|
d975ba3ac3a0983d3a08057c91cd96ca466708df
| 3,640,910
|
def LU_razcep(A):
""" Vrne razcep A kot ``[L\\U]`` """
# eliminacija
for p, pivot_vrsta in enumerate(A[:-1]):
for i, vrsta in enumerate(A[p + 1:]):
if pivot_vrsta[p]:
m = vrsta[p] / pivot_vrsta[p]
vrsta[p:] = vrsta[p:] - pivot_vrsta[p:] * m
vrsta[p] = m
return A
|
79d6a00b4e16254739b987228fd506cae133907b
| 3,640,911
|
def jni_request_identifiers_for_type(field_type, field_reference_name, field_name, object_name="request"):
"""
Generates jni code that defines C variable corresponding to field of java object
(dto or custom type). To be used in request message handlers.
:param field_type: type of the field to be initialized (as defined in vpe.api)
:param field_reference_name: name of the field reference in generated code
:param field_name: name of the field (camelcase)
:param object_name: name of the object to be initialized
"""
# field identifiers
jni_type = util.vpp_2_jni_type_mapping[field_type]
jni_signature = util.jni_2_signature_mapping[field_type]
jni_getter = util.jni_field_accessors[field_type]
# field identifier
return request_field_identifier_template.substitute(
jni_type=jni_type,
field_reference_name=field_reference_name,
field_name=field_name,
jni_signature=jni_signature,
jni_getter=jni_getter,
object_name=object_name)
|
4f23ba559124b938fa82a044ae1adc0f16f4a7ad
| 3,640,912
|
def _ValidateDuration(arg_internal_name, arg_value):
"""Validates an argument which should have a Duration value."""
try:
if isinstance(arg_value, basestring):
return TIMEOUT_PARSER(arg_value)
elif isinstance(arg_value, int):
return TIMEOUT_PARSER(str(arg_value))
except arg_parsers.ArgumentTypeError as e:
raise InvalidArgException(arg_internal_name, e.message)
raise InvalidArgException(arg_internal_name, arg_value)
|
b08b65831e04ece410be7f0a490cd6ebf7bcaa6f
| 3,640,913
|
def get_jaccard_dist1(y_true, y_pred, smooth=default_smooth):
"""Helper to get Jaccard distance (for loss functions).
Note: This mirrors what others in the ML community have been using even for
non-binary vectors."""
return 1 - get_jaccard_index1(y_true, y_pred, smooth)
|
c64ba7fd81c3697bc472d372afeb940e19d35e3c
| 3,640,914
|
from pathlib import Path
from typing import Dict
import json
import warnings
def deduplicate_obi_codes(fname: Path) -> None:
"""
Remove duplicate http://terminology.hl7.org/CodeSystem/v2-0203#OBI codes from an instance.
When using the Medizininformatik Initiative Profile LabObservation, SUSHI v2.1.1 inserts the identifier.type code
for http://terminology.hl7.org/CodeSystem/v2-0203#OBI twice, but it has a cardinality of 1, resulting in an error
by the FHIR validator. This workaround function actively removes the duplicates.
MII Profile: https://www.medizininformatik-initiative.de/fhir/core/modul-labor/StructureDefinition/ObservationLab
:param fname: Filename of instance to remove duplicates from
:return: None
"""
def num_obi_codes(json_data: Dict):
jp = parse(
"$.type.coding[?code = 'OBI' & system='http://terminology.hl7.org/CodeSystem/v2-0203']"
)
return len(jp.find(json_data))
def del_obi_codes(identifier: Dict):
codings = identifier["type"]["coding"]
for i, coding in enumerate(codings):
if (
coding["system"] == "http://terminology.hl7.org/CodeSystem/v2-0203"
and coding["code"] == "OBI"
):
del codings[i]
break
json_data = json.load(open(fname))
if "identifier" not in json_data:
return
for identifier in json_data["identifier"]:
if num_obi_codes(identifier) > 1:
warnings.warn(f"Found multiple OBI codes in {fname}, removing")
del_obi_codes(identifier)
json.dump(json_data, open(fname, "w"), indent=2)
|
336a143e30224b64c39358137bab26e4013c5049
| 3,640,915
|
def fold_conv_bns(onnx_file: str) -> onnx.ModelProto:
"""
When a batch norm op is the only child operator of a conv op, this function
will fold the batch norm into the conv and return the processed graph
:param onnx_file: file path to ONNX model to process
:return: A loaded ONNX model with BatchNormalization ops folded into Conv ops
where possible
"""
model = onnx.load(onnx_file)
conv_nodes = [n for n in model.graph.node if n.op_type == "Conv"]
graph_modified = False
for conv_node in conv_nodes:
conv_output = conv_node.output[0]
child_nodes = [n for n in model.graph.node if conv_output in n.input]
# Check if the only child of the conv output is a batch norm op
if len(child_nodes) == 1 and child_nodes[0].op_type == "BatchNormalization":
bn_node = child_nodes[0]
fold_performed = _fold_conv_bn(model, conv_node, bn_node)
graph_modified = fold_performed or graph_modified
return model if graph_modified else None
|
25c2748b0e964310cc9909b60e68a9740e3e0df1
| 3,640,916
|
def numdays(year, month):
"""
numdays returns the number of days in the given month of
the given year.
Args:
year
month
Returns:
ndays: number of days in month
"""
NDAYS = list([31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31])
assert(year >= 0)
assert(1 <= month and month <= 12)
ndays = NDAYS[month-1]
# Check for leap year for February
if ((month == 2) and leapyear(year)):
ndays += 1
return ndays
|
159a41f3706b087194e0ba5d107a1ceb88583c21
| 3,640,917
|
def normalise_diversity_year_df(y_div_df):
"""Normalises a dataframe with diversity information by year and parametre set"""
yearly_results_norm = []
# For each possible diversity metric it pivots over parametre sets
# and calculates the zscore for the series
for x in set(y_div_df["diversity_metric"]):
yearly_long = y_div_df.query(f"diversity_metric == '{x}'").pivot_table(
index=["year", "diversity_metric"], columns="parametre_set", values="score"
)
yearly_long_norm = yearly_long.apply(zscore)
yearly_results_norm.append(yearly_long_norm)
# Concatenate and melt so they can be visualised with altair
y_div_df_norm = (
pd.concat(yearly_results_norm)
.reset_index(drop=False)
.melt(
id_vars=["year", "diversity_metric"],
var_name="parametre_set",
value_name="score",
)
)
return y_div_df_norm
|
83e12072e65a707dd61b98383ce295fac8e9f2f7
| 3,640,918
|
def allowed_file(filename):
"""Does filename have the right extension?"""
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
|
f42ac5ef5470515258715b4552945206a440effb
| 3,640,919
|
from typing import Dict
from typing import Optional
from typing import Any
from typing import List
import tokenize
def render(
template: str,
context: Dict,
serializer: Optional[CallableType[[Any], str]] = None,
partials: Optional[Dict] = None,
missing_variable_handler: Optional[CallableType[[str, str], str]] = None,
missing_partial_handler: Optional[CallableType[[str, str], str]] = None,
cache_tokens: bool = False,
) -> str:
"""Render a mustache template"""
serializer = serializer or default_serializer
missing_variable_handler = missing_variable_handler or missing_variable_default
missing_partial_handler = missing_partial_handler or missing_partial_default
partials = partials or {}
output: str = ''
context_stack: List = [context]
env_stack: List = []
left_delimiter: str = '{{'
right_delimiter: str = '}}'
pointer: int = 0
tokens = []
if cache_tokens:
tokens = list(tokenize(template, 0, left_delimiter, right_delimiter))
while True:
if cache_tokens:
try:
(token, value, indentation), position_pointer = tokens[pointer]
pointer += 1
except IndexError:
break
else:
try:
(token, value, indentation), pointer = next(
tokenize(template, pointer, left_delimiter, right_delimiter)
)
position_pointer = pointer
except StopIteration:
break
current_context = context_stack[-1]
if token is Token.SET_DELIMITER:
new_delimiters = value.strip().split(' ')
left_delimiter = new_delimiters[0]
right_delimiter = new_delimiters[-1]
if token is Token.END:
current_env = env_stack[-1]
context_stack.pop()
env_name, env_pointer, [env_var, _] = current_env
if should_iterate(env_var):
current_env[2][1] += 1
try:
next_item = env_var[current_env[2][1]]
context_stack.append(next_item)
pointer = env_pointer
continue
except IndexError:
pass
if env_name != value:
raise MustacheSyntaxError.from_template_pointer(
f'Unexpected section end tag on line {{line_number}}. Expected "{env_name}" got "{value}"',
template,
position_pointer,
)
env_stack.pop()
if not current_context and len(context_stack) != 1:
if token in [Token.SECTION, Token.INVERTED]:
context_stack.append(False)
env_stack.append([value, pointer, [False, 0]])
continue
if token in [Token.NO_ESCAPE, Token.VARIABLE, Token.SECTION, Token.INVERTED]:
try:
variable = get_from_context(context_stack, value)
except MissingVariable:
variable = missing_variable_handler(
value, f'{left_delimiter} {value} {right_delimiter}'
)
else:
variable = None
if token is Token.LITERAL:
output += value
elif token is Token.NO_ESCAPE:
output += serializer(variable)
elif token is Token.VARIABLE:
output += escape(serializer(variable))
elif token in [Token.SECTION, Token.INVERTED]:
if token is Token.INVERTED:
variable = not variable
if should_iterate(variable):
try:
context_item = variable[0]
context_stack.append(context_item)
except IndexError:
context_stack.append(False)
else:
context_stack.append(variable)
env_stack.append([value, pointer, [variable, 0]])
elif token is Token.PARTIAL:
partial_template = partials.get(value) # potentially raise error here
if partial_template is None:
partial_template = missing_partial_handler(
value, f'{left_delimiter} {value} {right_delimiter}'
)
if partial_template != '':
remove_trailing_indentation = False
if partial_template.endswith('\n'):
remove_trailing_indentation = True
partial_template = indentation + f'\n{indentation}'.join(
partial_template.split('\n')
)
if remove_trailing_indentation:
partial_template = partial_template[: -len(indentation)]
partial_output = render(
partial_template, current_context, serializer=serializer, partials=partials
)
output += partial_output
return output
|
b660c0ac97915121d061fd5c7dde8cccea42f03f
| 3,640,920
|
def preprocess_observations(input_observation, prev_processed_observation, input_dimensions):
""" convert the 210x160x3 uint8 frame into a 6400 float vector """
processed_observation = input_observation[35:195] # crop
processed_observation = downsample(processed_observation)
processed_observation = remove_color(processed_observation)
processed_observation = remove_background(processed_observation)
processed_observation[processed_observation != 0] = 1 # everything else (paddles, ball) just set to 1
# Convert from 80 x 80 matrix to 6400 x 1 matrix
processed_observation = processed_observation.astype(np.float).ravel()
# subtract the previous frame from the current one so we are only processing on changes in the game
if prev_processed_observation is not None:
input_observation = processed_observation - prev_processed_observation
else:
input_observation = np.zeros(input_dimensions)
# store the previous frame so we can subtract from it next time
prev_processed_observations = processed_observation
return input_observation, prev_processed_observations
|
885fbb2a1f81200843bb15d37f3c13726c23ea90
| 3,640,921
|
def expand_configuration(configuration):
"""Fill up backups with defaults."""
for backup in configuration['backups']:
for field in _FIELDS:
if field not in backup or backup[field] is None:
if field not in configuration:
backup[field] = None
else:
backup[field] = configuration[field]
return configuration['backups']
|
218f5c5cb67d3fa0f52b453d3cd00cde40835025
| 3,640,922
|
def create_feature_extractor(input_shape: tuple, dropout:float=0.3, kernel_size:tuple=(3,3,3)) -> tf.keras.Sequential:
"""
Create feature extracting model
:param input_shape: shape of input Z, X, Y, channels
:return: feature extracting model
"""
model = Sequential()
model.add(Conv3D(filters=4, kernel_size=kernel_size, padding='same', activation='relu', strides=(2, 2, 2),
input_shape=input_shape))
model.add(Conv3D(filters=8, kernel_size=kernel_size, padding='same', activation='relu', strides=(2, 2, 2)))
model.add(Conv3D(filters=16, kernel_size=kernel_size, padding='same', activation='relu', strides=(2, 2, 2)))
model.add(Dropout(dropout))
return model
|
47f52bab452e6bf7c9875a3c9c85bed02b79fcdc
| 3,640,923
|
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up the component."""
hass.data.setdefault(DOMAIN, {})
async_add_defaults(hass, config_entry)
router = KeeneticRouter(hass, config_entry)
await router.async_setup()
undo_listener = config_entry.add_update_listener(update_listener)
hass.data[DOMAIN][config_entry.entry_id] = {
ROUTER: router,
UNDO_UPDATE_LISTENER: undo_listener,
}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
return True
|
beac0da52a530aa63495003b78a87638b869779c
| 3,640,924
|
def OGH(p0, p1, v0, v1, t0, t1, t):
"""Optimized geometric Hermite curve."""
s = (t-t0)/(t1-t0)
a0 = (6*np.dot((p1-p0).T,v0)*np.dot(v1.T,v1) - 3*np.dot((p1-p0).T,v1)*np.dot(v0.T,v1)) / ((4*np.dot(v0.T,v0)*np.dot(v1.T,v1) - np.dot(v0.T,v1)*np.dot(v0.T,v1))*(t1-t0))
a1 = (3*np.dot((p1-p0).T,v0)*np.dot(v0.T,v1) - 6*np.dot((p1-p0).T,v1)*np.dot(v0.T,v0)) / ((np.dot(v0.T,v1)*np.dot(v0.T,v1) - 4*np.dot(v0.T,v0)*np.dot(v1.T,v1))*(t1-t0))
h0 = (2*s+1)*(s-1)*(s-1)
h1 = (-2*s+3)*s*s
h2 = (1-s)*(1-s)*s
h3 = (s-1)*s*s
plt.plot([p0[0],p1[0]], [p0[1],p1[1]], ':c')
plt.plot([p0[0], (p0+v0)[0]], [p0[1], (p0+v0)[1]], '-g')
plt.plot([p1[0], (p1+v1)[0]], [p1[1], (p1+v1)[1]], '-g')
return h0*p0 + h1*p1 + h2*v0*a0 + h3*v1*a1
|
8bf86bbb2105ec26586a3568bb1a6448b284fbec
| 3,640,927
|
def permutation_test(v1, v2, iter=1000):
"""
Conduct Permutation test
Parameters
----------
v1 : array
Vector 1.
v2 : array
Vector 2.
iter : int. Default is 1000.
The times for iteration.
Returns
-------
p : float
The permutation test result, p-value.
"""
if len(v1) != len(v2):
return "Invalid input"
# permutation test
diff = abs(np.average(v1) - np.average(v2))
v = np.hstack((v1, v2))
nv = v.shape[0]
ni = 0
for i in range(iter):
vshuffle = np.random.permutation(v)
vshuffle1 = vshuffle[:int(nv/2)]
vshuffle2 = vshuffle[int(nv/2):]
diff_i = np.average(vshuffle1) - np.average(vshuffle2)
if diff_i >= diff:
ni = ni + 1
# permunitation test p-value
p = np.float64(ni/iter)
return p
|
3b618069b610d0ee37e8bcb32f814e34efaeebab
| 3,640,928
|
def registered_paths():
"""Return paths added via registration
..note:: This returns a copy of the registered paths
and can therefore not be modified directly.
"""
return list(_registered_paths)
|
4bd8471fc2bff1e09a84b1ae8878c0db5f7afd65
| 3,640,929
|
import torch
def nms_dynamic(ctx, g, boxes: Tensor, scores: Tensor,
max_output_boxes_per_class: int, iou_threshold: float,
score_threshold: float):
"""Rewrite symbolic function for default backend.
Support max_output_boxes_per_class, iou_threshold, score_threshold of
constant Tensor, which is aligned with ONNX's nms op.
Args:
ctx (ContextCaller): The context with additional information.
g (Graph): The traced onnx graph.
boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4].
scores (Tensor): The detection scores of shape
[N, num_boxes, num_classes].
max_output_boxes_per_class (int): Maximum number of output
boxes per class of nms.
iou_threshold (float): IOU threshold of nms.
score_threshold (float): score threshold of nms.
Returns:
NonMaxSuppression op for onnx.
"""
if not sym_help._is_value(max_output_boxes_per_class):
max_output_boxes_per_class = g.op(
'Constant',
value_t=torch.tensor(max_output_boxes_per_class, dtype=torch.long))
if not sym_help._is_value(iou_threshold):
iou_threshold = g.op(
'Constant',
value_t=torch.tensor([iou_threshold], dtype=torch.float))
if not sym_help._is_value(score_threshold):
score_threshold = g.op(
'Constant',
value_t=torch.tensor([score_threshold], dtype=torch.float))
return g.op('NonMaxSuppression', boxes, scores, max_output_boxes_per_class,
iou_threshold, score_threshold)
|
6b6eea9ce2f2fe84cabb85ddbb069732fa78cca9
| 3,640,930
|
from typing import Union
from datetime import datetime
import pytz
def api_timestamp_to_datetime(api_dt: Union[str, dict]):
"""Convertes the datetime string returned by the API to python datetime object"""
"""
Somehow this string is formatted with 7 digits for 'microsecond' resolution, so crop the last digit (and trailing Z)
The cropped string will be written into api_dt_str_mod
"""
api_dt_str_mod = None
if isinstance(api_dt, str):
api_dt_str_mod = api_dt[:-2]
elif isinstance(api_dt, dict):
api_dt_str_mod = api_dt["dateTime"][:-2]
else:
raise
dt = datetime.strptime(api_dt_str_mod, "%Y-%m-%dT%H:%M:%S.%f")
dt = pytz.utc.localize(dt)
return dt
|
26f4828a19d17c883a8658eb594853158d70fbcf
| 3,640,931
|
from typing import List
def calc_mutation(offsprings: List[List[List[int]]], mut_rate: float, genes_num: int) -> List[List[List[int]]]:
"""
Not necessary, however when provided and returns value other than None, the simulator is going to use
this one instead of the given ones by default, if you are not intending to use it leave it to `return None`
"""
return None
|
d665b7c2ff8ddfa2c4b905c3d6bab02028e30ec2
| 3,640,932
|
def compute_targets(ex_rois, gt_rois, weights=(1.0, 1.0, 1.0, 1.0)):
"""Compute bounding-box regression targets for an image."""
return box_utils.bbox_transform_inv(ex_rois, gt_rois, weights).astype(
np.float32, copy=False
)
|
de2a65b5c3c44bbd4bffcd0d99143982ed4c031c
| 3,640,933
|
def _args_filter(args):
"""
zenith db api only accept list of tuple arguments for bind execute, that is ungainly
so we should make all kind of arguments to list of tuple arguments
"""
if isinstance(args, (GeneratorType, )):
args = list(args)
if len(args) <= 0:
return []
if isinstance(args[0], (tuple, list,)):
return [tuple(v) for v in args]
else:
return [tuple(args), ]
|
af9a836c1389acc4e0faf0d08e47ef8a39e57345
| 3,640,934
|
def getAreaDF(spark):
"""
Returns a Spark DF containing the BLOCK geocodes and the Land and Water area columns
Parameters
==========
spark : SparkSession
Returns
=======
a Spark DF
Notes
=====
- Converts the AREALAND and AREAWATER columns from square meters to square miles
- Used primarily for calculating Population Density
"""
area_cols = ['AREALAND', 'AREAWATER']
area = getGRFC(spark, columns=area_cols)
for area_col in area_cols:
area = area.withColumn(area_col, sf.col(area_col).cast("long")).persist()
# calculation for converting square meters (current units for AREALAND from the GRFC) to square miles
# square miles = square meters / 2,589,988
# https://www.census.gov/quickfacts/fact/note/US/LND110210
area = area.withColumn(area_col, sf.col(area_col) / sf.lit(2589988)).persist()
area = area.withColumn("AREA_SQUARE_MILES", sf.expr(" + ".join(area_cols))).persist()
return area
|
181e84e98ca2cf83be0cf5dbf41a8dbc46b88ad4
| 3,640,935
|
def how_many():
"""Check current number of issues waiting in SQS."""
if not is_request_valid(request):
abort(400)
lapdog_instance = Lapdog()
lapdog_instance.how_many()
return jsonify(
response_type="in_channel",
text="There are 4 issues waiting to be handled",
)
|
db132bed6c957ad1f922776165ccb999bfcedb32
| 3,640,937
|
import struct
def read_sbd(filepath):
"""Reads an .sbd file containing spectra in either profile or centroid mode
Returns:
list:List of spectra
"""
with open(filepath, 'rb') as in_file:
header = struct.unpack("<BQB", in_file.read(10))
meta_size = header[1] * 20 # sizeof(QLfHH)
meta = [meta_item for meta_item in
struct.iter_unpack("<QLfHH", in_file.read(meta_size))]
num_points = [meta_item[1] for meta_item in meta]
spectra = [read_spectrum(in_file, n) for n in num_points]
return (header, meta, spectra)
|
364499580d5531d7361b87d3f575bf006fc79791
| 3,640,938
|
def dct2(X, blksize):
"""Calculate DCT transform of a 2D array, X
In order for this work, we have to split X into blksize chunks"""
dctm = dct_mat(blksize)
#try:
#blks = [sp.vsplit(x, X.shape[1]/blksize) for x in sp.hsplit(X, X.shape[0]/blksize)]
#except:
# print "Some error occurred"
output = sp.zeros(X.shape)
if output.ndim==3:
for i in range(blksize,X.shape[0],blksize):
for j in range(blksize, X.shape[1], blksize):
for c in range(X.shape[2]):
b = X[i-blksize:i, j-blksize:j, c]
output[i-blksize:i, j-blksize:j, c] = sp.dot(sp.dot(dctm,b),dctm.T)
elif output.ndim==2:
for i in range(blksize,X.shape[0],blksize):
for j in range(blksize, X.shape[1], blksize):
b = X[i-blksize:i, j-blksize:j]
output[i-blksize:i, j-blksize:j] = sp.dot(sp.dot(dctm,b),dctm.T)
#blks = [sp.dot(sp.dot(dctm, b), dctm.T) for b in blks]
#return sp.concatenate([blk for blk in blks]).reshape(X.shape)
return output
|
79aa158f4fd05ac35bad2d16c14b3b8cbd8351af
| 3,640,939
|
def print_filtering(dataset, filter_vec, threshold, meta_name):
"""Function to select the filtering_names(names of those batches or cell types with less proportion of cells than threshold),
and print an informative table with: batches/cell types, absolute_n_cells, relative_n_cells, Exluded or not.
"""
cell_count = filter_vec.value_counts(ascending=False)
print("**", meta_name , "containing less than:", str(threshold), "of total cells are removed" +"\n" + "**", meta_name, "filtered based on our threshold")
#dataframe informing about the filtering about to be done
exclude_df = pd.DataFrame({meta_name: cell_count.index.to_list(), 'n_cells': cell_count.values,
'%_cells': cell_count.values/dataset.n_obs, 'Excluded_?': cell_count.values/dataset.n_obs < threshold})
print(exclude_df)
removal_names = exclude_df[meta_name][exclude_df["Excluded_?"] == True].tolist()
return removal_names
|
c637a9d219443de730156e546d52461b9bcdfc84
| 3,640,940
|
from typing import Dict
def get_chunk_tags(chunks: Dict, attrs: str):
"""
Get tags for
:param chunks:
:param attrs:
:return:
"""
tags = []
for chunk in chunks:
resource_type = chunk['resource_type']
original_url = chunk['url']
parse_result = urlparse(original_url)
path = parse_result.path
# If under STATIC_URL rewrite using static tag so that we respect static file storage
# options, eg. ManifestStaticFileStorage
if settings.STATIC_URL and path.startswith(settings.STATIC_URL):
try:
path = static(path[len(settings.STATIC_URL):])
except ValueError:
# Allow url's that aren't managed by static files - eg. this will happen
# for ManifestStaticFileStorage if file is not in the manifest
pass
url = ParseResult(**dict(parse_result._asdict(), path=path)).geturl()
if resource_type == 'js':
tags.append(f'<script type="text/javascript" src="{url}" {attrs}></script>')
if resource_type == 'css':
tags.append(f'<link type="text/css" href="{url}" rel="stylesheet" {attrs}/>')
return tags
|
e7076b345bcca4e7fe8ac96002aad7499cf0b0f3
| 3,640,942
|
def __discount_PF(i, n):
"""
Present worth factor
Factor: (P/F, i, N)
Formula: P = F(1+i)^N
:param i:
:param n:
:return:
Cash Flow:
F
|
|
--------------
|
P
"""
return (1 + i) ** (-n)
|
b6e7424647921b945a524a22d844925573b6490a
| 3,640,943
|
def pw2dense(pw, maxd):
"""Make a pairwise distance matrix dense
assuming -1 is used to encode D = 0"""
pw = np.asarray(pw.todense())
pw[pw == 0] = maxd + 1
# pw[np.diag_indices_from(pw)] = 0
pw[pw == -1] = 0
return pw
|
68bbf753d80032a0e697b161c8836283a030a54a
| 3,640,944
|
from typing import Awaitable
def run_simulation(sim: td.Simulation) -> Awaitable[td.Simulation]:
"""Returns a simulation with simulation results
Only submits simulation if results not found locally or remotely.
First tries to load simulation results from disk.
Then it tries to load them from the server storage.
Finally, only submits simulation if not found
.. code::
import gtidy3d as gm
component = gf.components.straight(length=3)
sim = gm.get_simulation(component=component)
sim = run_simulation(sim).result()
"""
td.logging_level("error")
sim_hash = get_sim_hash(sim)
sim_path = PATH.results / f"{sim_hash}.hdf5"
logger.info(f"running simulation {sim_hash}")
hash_to_id = {d["task_name"][:32]: d["task_id"] for d in web.get_last_projects()}
target = PATH.results / f"{sim_hash}.hdf5"
# Try from local storage
if sim_path.exists():
logger.info(f"{sim_path} found in local storage")
sim = _executor.submit(load_results, sim, target)
# Try from server storage
elif sim_hash in hash_to_id:
task_id = hash_to_id[sim_hash]
sim = _executor.submit(load_results, sim, target, task_id)
# Only submit if simulation not found
else:
task_id = _export_simulation(sim=sim, task_name=sim_hash)
sim = _executor.submit(load_results, sim, target, task_id)
return sim
|
23524bff78ac326bbf74e2389180d924849e57f4
| 3,640,945
|
def get_cursor_position(fd=1):
"""Gets the current cursor position as an (x, y) tuple."""
csbi = get_console_screen_buffer_info(fd=fd)
coord = csbi.dwCursorPosition
return (coord.X, coord.Y)
|
b99cf19081af7e0d68523d1efdfc80c89cfe64cc
| 3,640,946
|
from typing import Tuple
def _held_karp(dists: np.ndarray) -> Tuple[float, np.ndarray]:
"""
Held-Karp algorithm solves the Traveling Salesman Problem.
This algorithm uses dynamic programming with memoization.
Parameters
----------
dists
Distance matrix.
Returns
-------
The cost and the path.
"""
n = len(dists)
# Maps each subset of the nodes to the cost to reach that subset, as well
# as what node it passed before reaching this subset.
# Node subsets are represented as set bits.
C = {}
# Set transition cost from initial state
for k in range(1, n):
C[1 << k, k] = (dists[0][k], 0)
# Iterate subsets of increasing length and store intermediate results
# in classic dynamic programming manner
for subset_size in range(2, n):
for subset in combinations(range(1, n), subset_size):
# Set bits for all nodes in this subset
bits = 0
for bit in subset:
bits |= 1 << bit
# Find the lowest cost to get to this subset
for k in subset:
prev = bits & ~(1 << k)
res = []
for m in subset:
if m == 0 or m == k:
continue
res.append((C[prev, m][0] + dists[m][k], m))
C[bits, k] = min(res)
# We're interested in all bits but the least significant (the start state)
bits = (2 ** n - 1) - 1
# Calculate optimal cost
res = []
for k in range(1, n):
res.append((C[bits, k][0] + dists[k][0], k))
opt, parent = min(res)
# Backtrack to find full path
path = []
for _ in range(n - 1):
path.append(parent)
new_bits = bits & ~(1 << parent)
_, parent = C[bits, parent]
bits = new_bits
# Add implicit start state
path.append(0)
return opt, np.array(path)[::-1]
|
982d771c1fef5e4f6311fd1b36216c95db7f1343
| 3,640,947
|
def NS(s,o):
"""
Nash Sutcliffe efficiency coefficient
Adapated to use in alarconpy by Albenis Pérez Alarcón
contact: apalarcon1991@gmail.com
Parameters
--------------------------
input:
s: simulated
o: observed
output:
ns: Nash Sutcliffe efficient coefficient
"""
s,o = filter_nan(s,o)
return 1 - sum((s-o)**2)/sum((o-np.mean(o))**2)
|
10c14022ae634a74f0a417454ddfa0fa52d89c8a
| 3,640,949
|
def UTArgs(v):
"""
tag UTArgs
"""
tag = SyntaxTag.TagUTArgs()
tag.AddV(v)
return tag
|
8d9ff601a5a2bf65e68e074dad1894342881950f
| 3,640,950
|
from src.praxxis.sqlite import sqlite_rulesengine
from src.praxxis.notebook.notebook import get_output_from_filename
def rules_check(rulesengine_db, filename, output_path, query_start, query_end):
"""check if any rules match"""
rulesets = sqlite_rulesengine.get_active_rulesets(rulesengine_db, query_start, query_end)
rulesmatch = []
hit = set()
predictions = []
for ruleset in rulesets:
filenames = sqlite_rulesengine.get_filenames_by_rule(ruleset[2])
for fmatch in filenames:
if fmatch[0] in filename:
rulesmatch.append(fmatch[1])
if rulesmatch != []:
#get output
output = get_output_from_filename(output_path)
outputs = sqlite_rulesengine.get_outputs_for_rules(ruleset[2], rulesmatch)
for omatch in outputs:
if omatch[0] in output:
hit.add(omatch[1])
predictions.extend(sqlite_rulesengine.get_predictions(ruleset[2], hit))
return predictions
|
a81d29a8a9d61ba6a577fbe9899967b81a25ff7f
| 3,640,951
|
def shortstr(s,max_len=144,replace={'\n':';'}):
""" Obtain a shorter string """
s = str(s)
for k,v in replace.items():
s = s.replace(k,v)
if max_len>0 and len(s) > max_len:
s = s[:max_len-4]+' ...'
return s
|
396794506583dcf39e74941a20f27ac63de325ec
| 3,640,952
|
def update_gms_stats_collection(
self,
application: bool = None,
dns: bool = None,
drc: bool = None,
drops: bool = None,
dscp: bool = None,
flow: bool = None,
interface: bool = None,
jitter: bool = None,
port: bool = None,
shaper: bool = None,
top_talkers: bool = None,
tunnel: bool = None,
) -> bool:
"""Enable/disable stats collection by orchestrator.
All parameters optional.
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - gmsStatsCollection
- POST
- /gms/statsCollection
:param application: Description missing in Swagger, defaults to None
:type application: bool, optional
:param dns: Description missing in Swagger, defaults to None
:type dns: bool, optional
:param drc: Description missing in Swagger, defaults to None
:type drc: bool, optional
:param drops: Description missing in Swagger, defaults to None
:type drops: bool, optional
:param dscp: Description missing in Swagger, defaults to None
:type dscp: bool, optional
:param flow: Description missing in Swagger, defaults to None
:type flow: bool, optional
:param interface: Description missing in Swagger, defaults to None
:type interface: bool, optional
:param jitter: Description missing in Swagger, defaults to None
:type jitter: bool, optional
:param port: Description missing in Swagger, defaults to None
:type port: bool, optional
:param shaper: Description missing in Swagger, defaults to None
:type shaper: bool, optional
:param top_talkers: Description missing in Swagger, defaults to None
:type top_talkers: bool, optional
:param tunnel: Description missing in Swagger, defaults to None
:type tunnel: bool, optional
:return: Returns True/False based on successful call.
:rtype: bool
"""
data = {}
if application is not None:
data["Application"] = application
if dns is not None:
data["Dns"] = dns
if drc is not None:
data["Drc"] = drc
if drops is not None:
data["Drops"] = drops
if dscp is not None:
data["Dscp"] = dscp
if flow is not None:
data["Flow"] = flow
if interface is not None:
data["Interface"] = interface
if jitter is not None:
data["Jitter"] = jitter
if port is not None:
data["Port"] = port
if shaper is not None:
data["Shaper"] = shaper
if top_talkers is not None:
data["TopTalkers"] = top_talkers
if tunnel is not None:
data["Tunnel"] = tunnel
return self._post(
"/gms/statsCollection",
data=data,
return_type="bool",
)
|
d6dce80a8543cae16eebf076eeaa3e1428831df5
| 3,640,953
|
def _get_nearby_factories(latitude, longitude, radius):
"""Return nearby factories based on position and search range."""
# ref: https://stackoverflow.com/questions/574691/mysql-great-circle-distance-haversine-formula
distance = 6371 * ACos(
Cos(Radians(latitude)) * Cos(Radians("lat")) * Cos(Radians("lng") - Radians(longitude))
+ Sin(Radians(latitude)) * Sin(Radians("lat"))
)
radius_km = radius
ids = Factory.objects.annotate(distance=distance).only("id").filter(distance__lt=radius_km).order_by("id")
if len(ids) > settings.MAX_FACTORY_PER_GET:
ids = _sample(ids, settings.MAX_FACTORY_PER_GET)
return (
Factory.objects.filter(id__in=[obj.id for obj in ids])
.prefetch_related(Prefetch('report_records', queryset=ReportRecord.objects.only("created_at").all()))
.prefetch_related(Prefetch('images', queryset=Image.objects.only("id").all()))
.prefetch_related(Prefetch('documents', queryset=Document.objects.only('created_at', 'display_status').all()))
.all()
)
|
b94c879d93a486b4ac0dd77bee6fb9d79395dc23
| 3,640,954
|
def add_register(request):
"""
处理注册提交的数据,保存到数据库
:param request:
:return:
"""
form = forms.RegisterForm(request.POST)
if form.is_valid():
data = form.cleaned_data
#清洗数据
data.pop("re_password")
data['password'] = hash_pwd.has_password(data.get('password'))
#添加必要数据
data['is_active'] = 1
#格式化储存
models.UserInfo.objects.create(
**data
)
return redirect('mysite:login')
else:
#把前端提交的包含错误信息的对象返回到前端页面
return render(request, 'login/register.html', {"form":form})
|
acaf3886773b599df2853a5e73ef504af27f1c53
| 3,640,955
|
import numpy
import pandas
def confidence_interval(data, alpha=0.1):
"""
Calculate the confidence interval for each column in a pandas dataframe.
@param data: A pandas dataframe with one or several columns.
@param alpha: The confidence level, by default the 90% confidence interval is calculated.
@return: A series where each entry contains the confidence-interval for the corresponding column.
"""
alpha = 0.1
t = lambda column: scipy_stats.t.isf(alpha/2.0, len(column)-1)
width = lambda column: t(column) * numpy.std(column.values, ddof=1)/sqrt(len(column))
formatted_interval = lambda column: "%.2f +/- %.4f" % (column.mean(), width(column))
return pandas.Series([formatted_interval(data[c]) for c in data.columns], index=data.columns)
|
f9c31549287723f7f75c265485b7cd9911f68168
| 3,640,956
|
def RunInTransactionOptions(options, function, *args, **kwargs):
"""Runs a function inside a datastore transaction.
Runs the user-provided function inside a full-featured, ACID datastore
transaction. Every Put, Get, and Delete call in the function is made within
the transaction. All entities involved in these calls must belong to the
same entity group. Queries are supported as long as they specify an
ancestor belonging to the same entity group.
The trailing arguments are passed to the function as positional arguments.
If the function returns a value, that value will be returned by
RunInTransaction. Otherwise, it will return None.
The function may raise any exception to roll back the transaction instead of
committing it. If this happens, the transaction will be rolled back and the
exception will be re-raised up to RunInTransaction's caller.
If you want to roll back intentionally, but don't have an appropriate
exception to raise, you can raise an instance of datastore_errors.Rollback.
It will cause a rollback, but will *not* be re-raised up to the caller.
The function may be run more than once, so it should be idempotent. It
should avoid side effects, and it shouldn't have *any* side effects that
aren't safe to occur multiple times. This includes modifying the arguments,
since they persist across invocations of the function. However, this doesn't
include Put, Get, and Delete calls, of course.
Example usage:
> def decrement(key, amount=1):
> counter = datastore.Get(key)
> counter['count'] -= amount
> if counter['count'] < 0: # don't let the counter go negative
> raise datastore_errors.Rollback()
> datastore.Put(counter)
>
> counter = datastore.Query('Counter', {'name': 'foo'})
> datastore.RunInTransaction(decrement, counter.key(), amount=5)
Transactions satisfy the traditional ACID properties. They are:
- Atomic. All of a transaction's operations are executed or none of them are.
- Consistent. The datastore's state is consistent before and after a
transaction, whether it committed or rolled back. Invariants such as
"every entity has a primary key" are preserved.
- Isolated. Transactions operate on a snapshot of the datastore. Other
datastore operations do not see intermediated effects of the transaction;
they only see its effects after it has committed.
- Durable. On commit, all writes are persisted to the datastore.
Nested transactions are not supported.
Args:
options: TransactionOptions specifying options (number of retries, etc) for
this transaction
function: a function to be run inside the transaction on all remaining
arguments
*args: positional arguments for function.
**kwargs: keyword arguments for function.
Returns:
the function's return value, if any
Raises:
TransactionFailedError, if the transaction could not be committed.
"""
return _RunInTransactionInternal(options,
datastore_rpc.TransactionMode.READ_WRITE,
function, *args, **kwargs)
|
9236024d034f193919e976a04eec9105ee899d48
| 3,640,957
|
def notify(message, key, target_object=None, url=None, filter_exclude={}):
"""
Notify subscribing users of a new event. Key can be any kind of string,
just make sure to reuse it where applicable! Object_id is some identifier
of an object, for instance if a user subscribes to a specific comment thread,
you could write:
notify("there was a response to your comment", "comment_response",
target_object=PostersObject,
url=reverse('comments:view', args=(PostersObject.id,)))
The below example notifies everyone subscribing to the "new_comments" key
with the message "New comment posted".
notify("New comment posted", "new_comments")
filter_exclude: a dictionary to exclude special elements of subscriptions
in the queryset, for instance filter_exclude={''}
"""
if _disable_notifications:
return 0
if target_object:
if not isinstance(target_object, Model):
raise TypeError(_("You supplied a target_object that's not an instance of a django Model."))
object_id = target_object.id
else:
object_id = None
objects = models.Notification.create_notifications(
key,
object_id=object_id,
message=message,
url=url,
filter_exclude=filter_exclude,
)
return len(objects)
|
9da7f8a498a3fad1f1acbb9e35e798083d6a25c5
| 3,640,958
|
from pathlib import Path
def get_project_root() -> Path:
"""Return the path of the project root folder.
Returns:
Path: Path to project root
"""
return Path(__file__).parent
|
0122844ae89a53b0cd28659be21fb932164719cd
| 3,640,959
|
def FTCS(Uo, diffX, diffY=None):
"""Return the numerical solution of dependent variable in the model eq.
This routine uses the explicit Forward Time/Central Space method
to obtain the solution of the 1D or 2D diffusion equation.
Call signature:
FTCS(Uo, diffX, diffY)
Parameters
----------
Uo: ndarray[float], =1d, 2d
The dependent variable at time level, n within the entire domain.
diffX : float
Diffusion number for x-component of the parabolic/diffusion
equation.
diffY : float, Default=None for 1-D applications
Diffusion number for y-component of the parabolic/diffusion
equation.
Returns
-------
U: ndarray[float], =1d, 2d
The dependent variable at time level, n+1 within the entire domain.
"""
shapeU = Uo.shape # Obtain Dimension
U = Uo.copy() # Initialize U
if len(shapeU) == 1:
U[1:-1] = (
Uo[1:-1] + diffX*(Uo[2:] - 2.0*Uo[1:-1] + Uo[0:-2])
)
elif len(shapeU) == 2:
U[1:-1, 1:-1] = (
Uo[1:-1, 1:-1]
+ diffX*(Uo[2:, 1:-1] - 2.0*Uo[1:-1, 1:-1] + Uo[0:-2, 1:-1])
+ diffY*(Uo[1:-1, 2:] - 2.0*Uo[1:-1, 1:-1] + Uo[1:-1, 0:-2])
)
return U
|
4b02749f3f50a2cff74abb75146159289d42b99e
| 3,640,960
|
def epicyclic_frequency(prof) -> Quantity:
"""Epicyclic frequency."""
Omega = prof['keplerian_frequency']
R = prof['radius']
return np.sqrt(2 * Omega / R * np.gradient(R ** 2 * Omega, R))
|
917fc1e094719f0dbb6a3ac7ca0396601060bf1c
| 3,640,961
|
def get_groups(
a_graph,
method='component_infomap', return_form='membership'):
"""
Return the grouping of the provided graph object using the specified
method. The grouping is returned as a list of sets each holding all
members of a group.
Parameters
==========
a_graph: :class:`igraph.Graph`
The graph to partition
method: str (default='component_infomap')
String specifying which method to use. If two methods
should be used one after the other they should be separated by `_`.
Default: 'component_infomap' which will first consider all
disconnected components as groups then apply infomap on all of
those groups to optionally further split.
return_form: str (default='membership')
Determines the format of how the social group structure should be
returned. Options are:
* ``'membership'``: A list returning for each `index` node the group it
belongs to.
* ``'memberlists'``: Dictionary with a list of members `value` for each
group `key`.
Returns
=======
dict
Depending on what was chosen for the `return_form` attribute, either the
membership dict, i.e.::
{
node_id: group_id,
...
}
or the memberlist dict, i.e.::
{
group_id: [node1_id, node2_id, ...],
...
}
(value) is returned.
"""
# methods = method.split('_')
# For now only 'component_infomap' is allowed as procedure
if method == 'component_infomap':
# first the connected components
a_graph.vs['component'] = a_graph.clusters(
).membership
components = set(a_graph.vs['component'])
# create for each component a graph and apply infomap to it
node_membership = {}
# print(
# 'INFO: Found {0} disconnected components'.format(len(components))
# )
if components:
# do the community detection on each component and create a
# compound group id: component_group
for component in components:
_comp_graph = a_graph.subgraph(
[
node['name']
for node in a_graph.vs
if node['component'] == component
]
)
_infompa_comp_graph = _comp_graph.community_infomap('weight')
_comp_graph.vs['_group'] = _infompa_comp_graph.membership
for node in _comp_graph.vs:
node_membership[node['name']] = '{0}_{1}'.format(
node['component'], node['_group']
)
del _infompa_comp_graph
else:
_infompa_comp_graph = a_graph.community_infomap('weight')
a_graph.vs['group'] = _infompa_comp_graph.membership
node_membership = {
node['name']: node['group']
for node in a_graph.vs
}
group_membership = {}
for node in node_membership:
try:
group_membership[node_membership[node]].append(node)
except KeyError:
group_membership[node_membership[node]] = [node]
if return_form == 'membership':
# nbr_nodes = len(a_graph.vs['name'])
# membership = [None]*nbr_nodes
# for g, members in group_membership.items():
# for member in members:
# membership[member] = g
# return membership
return node_membership
elif return_form == 'memberlists':
# return [_group for _group in group_membership.values()]
return group_membership
else:
return None
|
110dd9dc470d9426b388e0db1289ff0b23c4a963
| 3,640,963
|
def positional_rank_queues (service_platform,
api_key):
""" Get the queues that have positional ranks enabled.
References:
https://developer.riotgames.com/regional-endpoints.html
https://developer.riotgames.com/api-methods/#league-v4/GET_getQueuesWithPositionRanks
Arguments:
service_platform (str): The service platform that the request should be issued to.
api_key (str): The client's api key.
Returns:
dict: the details of the response to the issued http request.
"""
header_parameters = {
"X-Riot-Token": api_key
}
url = endpoints.v4["host"]["endpoint"].format(service_platform)
path = endpoints.v4["positional-rank-queues"]["endpoint"]
return _request_executor.get("".join([url, path]),
header_parameters=header_parameters)
|
f48f9a445aac9611d4892e1aab5e7699a4c3ec1f
| 3,640,964
|
def maplist(f, xs):
"""Implement `maplist` in pure Python."""
return list(map(f, xs))
|
894a58f9e2cd66fe9c327ea65433b8210051ed60
| 3,640,965
|
import string
import re
def pull_urls_excel_sheets(workbook):
"""
Pull URLs from cells in a given ExcelBook object.
"""
# Got an Excel workbook?
if (workbook is None):
return []
# Look through each cell.
all_cells = excel.pull_cells_workbook(workbook)
r = set()
for cell in all_cells:
# Skip empty cells.
value = None
try:
value = str(cell["value"]).strip()
except UnicodeEncodeError:
value = ''.join(filter(lambda x:x in string.printable, cell["value"])).strip()
if (len(value) == 0):
continue
# Add http:// for cells that look like they might be URLs
# missing the http part.
pat = r"[A-Za-z0-9_]{3,50}\.[A-Za-z]{2,10}/(?:[A-Za-z0-9_]{1,50}/)*[A-Za-z0-9_\.]{3,50}"
if (re.search(pat, value) is not None):
value = "http://" + value
# Look for URLs in the cell value.
for url in re.findall(read_ole_fields.URL_REGEX, value):
r.add(url.strip())
# Return any URLs found in cells.
return r
|
0359fb8e1fd552749e15cce631f756130c5199cf
| 3,640,966
|
import click
import requests
def do_request(base_url, api_path, key, session_id, extra_params=''):
"""
Voer een aanvraag uit op de KNVB API, bijvoorbeeld /teams; hiermee
vraag je alle team-data op
"""
hashStr = md5.new('{0}#{1}#{2}'.format(key,
api_path,
session_id)).hexdigest()
url = '{0}{1}?PHPSESSID={2}&hash={3}&{4}'.format(base_url,
api_path,
session_id,
hashStr,
extra_params)
headers = {
'HTTP_X_APIKEY': key,
'Content-Type': 'application/json'
}
click.echo('URL: {0}'.format(url))
r = requests.get(url, headers=headers)
json_data = r.json()
return json_data
|
44217caa2c2cdf7543597405836cf0bb1ac650cd
| 3,640,967
|
def write_code():
"""
Code that checks the existing path and snaviewpath
in the environmental viriables/PATH
"""
msg = """\n\n[Code]\n"""
msg += """function InstallVC90CRT(): Boolean;\n"""
msg += """begin\n"""
msg += """ Result := not DirExists('C:\WINDOWS\WinSxS\\x86_Microsoft.VC90."""
msg += """CRT_1fc8b3b9a1e18e3b_9.0.21022.8_x-ww_d08d0375');\n"""
msg += """end;\n\n"""
msg += """function NeedsAddPath(): boolean;\n"""
msg += """var\n"""
msg += """ oldpath: string;\n"""
msg += """ newpath: string;\n"""
msg += """ pathArr: TArrayOfString;\n"""
msg += """ i: Integer;\n"""
msg += """begin\n"""
msg += """ RegQueryStringValue(HKEY_CURRENT_USER,'Environment',"""
msg += """'PATH', oldpath)\n"""
msg += """ oldpath := oldpath + ';';\n"""
msg += """ newpath := '%SASVIEWPATH%';\n"""
msg += """ i := 0;\n"""
msg += """ while (Pos(';', oldpath) > 0) do begin\n"""
msg += """ SetArrayLength(pathArr, i+1);\n"""
msg += """ pathArr[i] := Copy(oldpath, 0, Pos(';', oldpath)-1);\n"""
msg += """ oldpath := Copy(oldpath, Pos(';', oldpath)+1,"""
msg += """ Length(oldpath));\n"""
msg += """ i := i + 1;\n"""
msg += """ // Check if current directory matches app dir\n"""
msg += """ if newpath = pathArr[i-1] \n"""
msg += """ then begin\n"""
msg += """ Result := False;\n"""
msg += """ exit;\n"""
msg += """ end;\n"""
msg += """ end;\n"""
msg += """ Result := True;\n"""
msg += """end;\n"""
msg += """\n"""
return msg
|
429eb64485a4fe240c1bebbfd2a2a89613b4fddd
| 3,640,968
|
import re
def get_filenames(filename):
"""
Return list of unique file references within a passed file.
"""
try:
with open(filename, 'r', encoding='utf8') as file:
words = re.split("[\n\\, \-!?;'//]", file.read())
#files = filter(str.endswith(('csv', 'zip')), words)
files = set(filter(lambda s: s.endswith(('.csv', '.zip', '.pdf', '.txt', '.tsv', '.cfg', '.ini')), words))
return list(files)
except Exception as e:
print(e)
return []
|
a1d8c396245cfc682ecc37edb3e673f87939b6fa
| 3,640,969
|
def format_filename_gen(prefix, seq_len, tgt_len, bi_data, suffix,
src_lang,tgt_lang,uncased=False,):
"""docs."""
if not uncased:
uncased_str = ""
else:
uncased_str = "uncased."
if bi_data:
bi_data_str = "bi"
else:
bi_data_str = "uni"
file_name = "{}-{}_{}.seqlen-{}.tgtlen-{}.{}{}.gen.{}".format(
src_lang[:2],tgt_lang[:2],
prefix, seq_len, tgt_len, uncased_str,
bi_data_str, suffix)
return file_name
|
4a54c1fbfe371d628c1d7019c131b8fa6755f900
| 3,640,970
|
def is_holiday(date) -> bool:
"""
Return True or False for whether a date is a holiday
"""
name = penn_holidays.get(date)
if not name:
return False
name = name.replace(' (Observed)', '')
return name in holiday_names
|
edb68fa552f0f772b29b5d8a414758e63c252045
| 3,640,971
|
import re
def tokenize_text(text):
"""
Tokenizes a string.
:param text: String
:return: Tokens
"""
token = []
running_word = ""
for c in text:
if re.match(alphanumeric, c):
running_word += c
else:
if running_word != "":
token.append(running_word)
if c not in filter_character:
token.append(c)
running_word = ""
if running_word != "":
token.append(running_word)
return token
|
b7f420d081d9cd658435ef623142a9d8ecf7b99b
| 3,640,972
|
def generate_dummy_probe(elec_shapes='circle'):
"""
Generate a 3 columns 32 channels electrode.
Mainly used for testing and examples.
"""
if elec_shapes == 'circle':
electrode_shape_params = {'radius': 6}
elif elec_shapes == 'square':
electrode_shape_params = {'width': 7}
elif elec_shapes == 'rect':
electrode_shape_params = {'width': 6, 'height': 4.5}
probe = generate_multi_columns_probe(num_columns=3,
num_elec_per_column=[10, 12, 10],
xpitch=25, ypitch=25, y_shift_per_column=[0, -12.5, 0],
electrode_shapes=elec_shapes, electrode_shape_params=electrode_shape_params)
return probe
|
ea0f900390cf808cd8df3a38df9c47b99b77167b
| 3,640,973
|
def try_decode(message):
"""Try to decode the message with each known message class; return
the first successful decode, or None."""
for c in MESSAGE_CLASSES:
try:
return c.decode(message)
except ValueError:
pass # The message was probably of a different type.
return None
|
1dbbe5a6426b67690834673cd049535b018c0097
| 3,640,974
|
def build_where_clause(args: dict) -> str:
"""
This function transforms the relevant entries of dict into the where part of a SQL query
Args:
args: The arguments dict
Returns:
A string represents the where part of a SQL query
"""
args_dict = {
'source_ip': 'source_ip.value',
'dest_ip': 'dest_ip.value',
'rule_matched': 'rule_matched',
'from_zone': 'from_zone',
'to_zone': 'to_zone',
'source_port': 'source_port',
'dest_port': 'dest_port',
'action': 'action.value',
'file_sha_256': 'file_sha_256',
'file_name': 'file_name',
'app': 'app',
'app_category': 'app_category',
'dest_device_port': 'dest_device_port',
'dest_edl': 'dest_edl',
'dest_dynamic_address_group': 'dest_dynamic_address_group',
'dest_location': 'dest_location',
'dest_user': 'dest_user',
'file_type': 'file_type',
'is_server_to_client': 'is_server_to_client',
'is_url_denied': 'is_url_denied',
'log_type': 'log_type',
'nat_dest': 'nat_dest',
'nat_dest_port': 'nat_dest_port',
'nat_source': 'nat_source',
'nat_source_port': 'nat_source_port',
'rule_matched_uuid': 'rule_matched_uuid',
'severity': 'severity',
'source_device_host': 'source_device_host',
'source_edl': 'source_edl',
'source_dynamic_address_group': 'source_dynamic_address_group',
'source_location': 'source_location',
'source_user': 'source_user',
'sub_type': 'sub_type.value',
'time_generated': 'time_generated',
'url_category': 'url_category',
'url_domain': 'url_domain'
}
if args.get('ip') and (args.get('source_ip') or args.get('dest_ip')):
raise DemistoException('Error: "ip" argument cannot appear with either "source_ip" nor "dest_ip"')
if args.get('port') and (args.get('source_port') or args.get('dest_port')):
raise DemistoException('Error: "port" argument cannot appear with either "source_port" nor "dest_port"')
non_string_keys = {'dest_port', 'source_port'}
if 'query' in args:
# if query arg is supplied than we just need to parse it and only it
return args['query'].strip()
where_clause = ''
if args.get('ip'):
ips = argToList(args.pop('ip'))
# Creating a query for ip argument using source ip and dest ip
where_clause += '(' + ' OR '.join(f'source_ip.value = "{ip}" OR dest_ip.value = "{ip}"' for ip in ips) + ')'
if any(args.get(key) for key in args_dict) or args.get('port') or args.get('url'):
where_clause += ' AND '
if args.get('port'):
ports = argToList(args.pop('port'))
# Creating a query for port argument using source port and dest port
where_clause += '(' + ' OR '.join(f'source_port = {port} OR dest_port = {port}' for port in ports) + ')'
if any(args.get(key) for key in args_dict):
where_clause += ' AND '
if args.get('url'):
urls = argToList(args.pop('url'))
# Creating a query for url argument using uri and referer
where_clause += '(' + ' OR '.join(f'uri LIKE "%{url}%" OR referer LIKE "%{url}%"' for url in urls) + ')'
if any(args.get(key) for key in args_dict):
where_clause += ' AND '
# We want to add only keys that are part of the query
string_query_fields = {key: value for key, value in args.items() if key in args_dict and key not in non_string_keys}
or_statements = []
for key, values in string_query_fields.items():
string_values_list: list = argToList(values)
field = args_dict[key]
or_statements.append(' OR '.join([f'{field} = "{value}"' for value in string_values_list]))
# ports are digested as ints and cannot be sent as strings
non_string_query_fields = {key: value for key, value in args.items() if key in non_string_keys}
for key, values in non_string_query_fields.items():
non_string_values_list: list = argToList(values)
field = args_dict[key]
or_statements.append(' OR '.join([f'{field} = {value}' for value in non_string_values_list]))
where_clause += ' AND '.join([f'({or_statement})' for or_statement in or_statements if or_statement])
return where_clause
|
3b85c92346be254646dd5208259cee317f6f9741
| 3,640,975
|
def matrix_scale(s):
"""Produce scaling transform matrix with uniform scale s in all 3 dimensions."""
M = matrix_ident()
M[0:3,0:3] = np.diag([ s, s, s ]).astype(np.float64)
return M
|
22949a406865c18fe8200e43ea046ca6f16bdd6f
| 3,640,976
|
from typing import List
def magnitude_datapoints(data: DataPoint) -> List:
"""
:param data:
:return:
"""
if data is None or len(data) == 0:
return []
input_data = np.array([i.sample for i in data])
data = norm(input_data, axis=1).tolist()
return data
|
b6c505f02042cfc34183a19cc0843b28e25dd6b2
| 3,640,977
|
def svn_stringbuf_from_aprfile(*args):
"""svn_stringbuf_from_aprfile(svn_stringbuf_t result, apr_file_t file, apr_pool_t pool) -> svn_error_t"""
return apply(_core.svn_stringbuf_from_aprfile, args)
|
d9faccd861d5382593988c1e2585207e0b5fa89f
| 3,640,979
|
from pathlib import Path
def Arrow_Head_A (cls, elid = "SVG:Arrow_Head_A", design_size = 12, ref_x = None, stroke = "black", marker_height = 6, marker_width = 6, fill = "white", fill_opacity = 1, ** kw) :
"""Return a marker that is an arrow head with an A-Shape.
>>> mrk = Marker.Arrow_Head_A ()
>>> svg = Document (Root (view_box="0 0 1000 500"))
>>> svg.add (Defs (mrk))
>>> svg.add (Rect (x = 5, y = 5, width = 990, height = 490, fill = "none", stroke = "orange", stroke_width = 5))
>>> svg.add (Path (fill = "none", stroke = "red", stroke_width = 25, marker_end = "url(#SVG:Arrow_Head_A)", d = "M 100 200 L 500 200 900 400"))
>>> svg.add (Path (fill = "none", stroke = "blue", stroke_width =10, marker_start = "url(#SVG:Arrow_Head_A)", d = "M 100 100 L 500 100 900 50"))
>>> svg.write_to_xml_stream ()
<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<!DOCTYPE svg PUBLIC
"-//W3C//DTD SVG 1.1//EN"
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" viewBox="0 0 1000 500" xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
>
<defs>
<marker id="SVG:Arrow_Head_A" fill="none" markerHeight="6"
markerUnits="strokeWidth" markerWidth="6" orient="auto" refX="0"
refY="6" stroke="black" viewBox="0 0 12 12"
>
<path d="m 0,9.0 0,-6.0 6.0,3.0 -6.0,3.0 z" fill="white"
fill-opacity="1" stroke="none"
>
</path>
<path d="m 0,9.0 5.0,-3.0 -5.0,-3.0">
</path>
<path d="m 2.0,4.0 0,4.0">
</path>
</marker>
</defs>
<rect fill="none" height="490" stroke="orange" stroke-width="5"
width="990" x="5" y="5"
/>
<path d="M 100 200 L 500 200 900 400" fill="none"
marker-end="url(#SVG:Arrow_Head_A)" stroke="red" stroke-width="25"
>
</path>
<path d="M 100 100 L 500 100 900 50" fill="none"
marker-start="url(#SVG:Arrow_Head_A)" stroke="blue"
stroke-width="10"
>
</path>
</svg>
"""
# modifying design size will draw with different line-strength
# compared to the shape
size = design_size
size_2 = size // 2
scope = Scope ()
if ref_x is None :
ref_x = 0
result = cls \
( Path
( d = "m %s,%s 0,%s %s,%s %s,%s z" %
( 0
, size * 3 / 4.
, -(size / 2.)
, size / 2.
, size / 4.
, -(size / 2.)
, size / 4.
)
, fill = fill
, fill_opacity = fill_opacity
, stroke = "none"
)
, Path
( d = "m %s,%s %s,%s %s,%s" %
( 0
, size * 3 / 4.
, size * 5 / 12.
, -(size / 4.)
, -(size * 5 / 12.)
, -(size / 4.)
)
)
, Path
( d = "m %s,%s 0,%s" %
( size / 6.
, size / 3.
, size / 3.
)
)
, elid = elid
, fill = "none"
, marker_units = "strokeWidth"
, marker_height = marker_height
, marker_width = marker_width
, orient = "auto"
, ref_x = ref_x
, ref_y = size_2
, stroke = stroke
, view_box = (0, 0, size, size)
, ** kw
)
return result
|
661409c1ed37e33e9aea306b1c5b8d2a369bbaf2
| 3,640,980
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.