content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from typing import Tuple
def http(func: str, arg: Tuple[str]) -> int:
"""Summary.
Args:
func (str): Path to a function.
arg (Tuple[str]): Description
Returns:
int: Description
"""
return ERGO_CLI.http(func, *list(arg))
|
1e46eefa4101ff63d2d3851bacbfd472e1d3c7ce
| 3,640,752
|
import time
def isMWS_bhb(primary=None, objtype=None,
gaia=None, gaiaaen=None, gaiadupsource=None, gaiagmag=None,
gflux=None, rflux=None, zflux=None,
w1flux=None, w1snr=None, maskbits=None,
gnobs=None, rnobs=None, znobs=None,
gfracmasked=None, rfracmasked=None, zfracmasked=None,
parallax=None, parallaxerr=None):
"""Set bits for BHB Milky Way Survey targets
Parameters
----------
see :func:`~desitarget.cuts.set_target_bits` for other parameters.
Returns
-------
mask : array_like.
True if and only if the object is a MWS-BHB target.
Notes
-----
- Criteria supplied by Sergey Koposov
- gflux, rflux, zflux, w1flux have been corrected for extinction
(unlike other MWS selections, which use obs_flux).
- Current version (03/20/21) is version 1 on `the SV3 wiki`_.
"""
if primary is None:
primary = np.ones_like(gaia, dtype='?')
mws = primary.copy()
# ADM do not target any objects for which entries are NaN
# ADM and turn off the NaNs for those entries
nans = np.isnan(gflux) | np.isnan(rflux) | np.isnan(zflux) | np.isnan(w1flux) | np.isnan(parallax) | np.isnan(gaiagmag)
w = np.where(nans)[0]
if len(w) > 0:
# ADM make copies as we are reassigning values
rflux, gflux, zflux, w1flux = rflux.copy(), gflux.copy(), zflux.copy(), w1flux.copy()
parallax = parallax.copy()
gaigmag = gaiagmag.copy()
rflux[w], gflux[w], zflux[w], w1flux[w] = 0., 0., 0., 0.
parallax[w] = 0.
gaiagmag[w] = 0.
mws &= ~nans
log.info('{}/{} NaNs in file...t = {:.1f}s'
.format(len(w), len(mws), time()-start))
gmag = 22.5 - 2.5 * np.log10(gflux.clip(1e-7))
rmag = 22.5 - 2.5 * np.log10(rflux.clip(1e-7))
zmag = 22.5 - 2.5 * np.log10(zflux.clip(1e-7))
gmr = gmag-rmag
rmz = rmag-zmag
# ADM don't target MWS-like targets in Legacy Surveys mask regions.
mws &= imaging_mask(maskbits, mwsmask=True)
# APC must be a Legacy Surveys object that matches a Gaia source
mws &= gaia
# APC type must be PSF
mws &= _psflike(objtype)
# APC no sources brighter than Gaia G = 10
mws &= gaiagmag > 10.
# APC exclude nearby sources by parallax
mws &= parallax <= 0.1 + 3*parallaxerr
mws &= (gfracmasked < 0.5) & (gflux > 0) & (gnobs > 0)
mws &= (rfracmasked < 0.5) & (rflux > 0) & (rnobs > 0)
mws &= (zfracmasked < 0.5) & (zflux > 0) & (znobs > 0)
# APC no gaia duplicated sources
mws &= ~gaiadupsource
# APC gaia astrometric excess noise < 3
mws &= gaiaaen < 3.0
# APC BHB extinction-corrected color range -0.35 <= gmr <= -0.02
mws &= (gmr >= -0.35) & (gmr <= -0.02)
# Coefficients from Sergey Koposov
bhb_sel = rmz - (1.07163*gmr**5 - 1.42272*gmr**4 + 0.69476*gmr**3 - 0.12911*gmr**2 + 0.66993*gmr - 0.11368)
mws &= (bhb_sel >= -0.05) & (bhb_sel <= 0.05)
# APC back out the WISE error = 1/sqrt(ivar) from the SNR = flux*sqrt(ivar)
w1fluxerr = w1flux/(w1snr.clip(1e-7))
w1mag_faint = 22.5 - 2.5 * np.log10((w1flux-3*w1fluxerr).clip(1e-7))
# APC WISE cut (Sergey Koposov)
mws &= rmag - 2.3*gmr - w1mag_faint < -1.5
# APC Legacy magnitude limits
mws &= (rmag >= 16.) & (rmag <= 20.)
return mws
|
1a4f4287263ee64497ebdf882cbe3b782840b8f3
| 3,640,753
|
def bernpoly(n, z):
"""
Evaluates the Bernoulli polynomial `B_n(z)`.
The first few Bernoulli polynomials are::
>>> from sympy.mpmath import *
>>> mp.dps = 15
>>> for n in range(6):
... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n)))
...
[1.0]
[-0.5, 1.0]
[0.166667, -1.0, 1.0]
[0.0, 0.5, -1.5, 1.0]
[-3.33333e-2, 0.0, 1.0, -2.0, 1.0]
[0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0]
At `z = 0`, the Bernoulli polynomial evaluates to a
Bernoulli number (see :func:`bernoulli`)::
>>> print bernpoly(12, 0), bernoulli(12)
-0.253113553113553 -0.253113553113553
>>> print bernpoly(13, 0), bernoulli(13)
0.0 0.0
"""
n = int(n)
assert n >= 0
# XXX: optimize
return sum(binomial(n,k)*bernoulli(k)*z**(n-k) for k in xrange(0,n+1))
|
60da6461246e48f8b5ff7172f7d244c59b9ad7ed
| 3,640,754
|
def sort(obs, pred):
"""
Return sorted obs and pred time series'
"""
obs = obs.sort_values(ascending=True)
pred = pred.sort_values(ascending=True)
return obs,pred
|
11c44c1fd605611a2722321b3c3d58a822b9c643
| 3,640,755
|
import random
def random_point_of_triangle(vertices):
"""Compute a random point of the triangle with given vertices"""
p, q, r = vertices
pq = q-p
pr = r-p
while True:
x = random.random()
y = random.random()
if x + y <= 1:
return p + pq*x + pr*y
|
ba3bf9183ddae4a16561a06b6f2455ce0ede6c8f
| 3,640,756
|
import time
def get_minutes(hour:str) -> int:
""" Get total number of minutes from time in %H:%M .
Args:
hour (str): String containing time in 24 hour %H:%M format
Returns:
int: Returns total number of minutes
"""
t = time.strptime(hour, '%H:%M')
minutes = t[3] * 60 + t[4]
return minutes
|
069835bdb6b0919d6206e0379a1933986ad2d5bd
| 3,640,757
|
from typing import Tuple
import logging
def get_rotation_scale_from_transformation(matrix: np.array) -> Tuple[np.array,np.array] :
"""
This function breaks the given transformation matrix into a Rotation matrix and a Scale matrix
as described in "As-Rigid-As-Possible Shape Interpolation" by Alexa et al
Arguments:
matrix : Any transformation matrix
Returns:
R_gamma: Rotation matrix 3x3
S: Scale matrix 3x3
"""
R_alpha,D,R_beta = np.linalg.svd(matrix,full_matrices=True)
D = np.eye(3)*D
R_gamma = R_alpha @ R_beta
if np.linalg.det(R_gamma) < 0:
R_gamma[0,:] *= -1
S = R_beta.T @ D @ R_beta
assert is_rotation_matrix(R_gamma), logging.error("Computed matrix is not a rotation")
return (R_gamma,S)
|
c48ed5d1e880c7caf79e009bfeb84c95de8007e3
| 3,640,758
|
def calc_check_digit(number):
"""Calculate the check digit."""
weights = (7, 9, 8, 6, 5, 4, 3, 2)
check = sum(w * int(n) for w, n in zip(weights, number)) % 11
return str((10 - check) % 9 + 1)
|
eec82a1e6cec8baf513db16e672294df79ce4b9f
| 3,640,759
|
import json
def leave_studygroup(request):
"""
Remove a student from the list of participants of a study group.
"""
body = json.loads(request.body)
group_id = body['id']
token = body['token']
rcs = Student.objects.get(token=token).rcs
group = Studygroup.objects.get(id=group_id)
participants = json.loads(group.participants)
participants.remove(rcs)
group.participants = json.dumps(participants)
group.save()
res = {'res': 'OK'}
return JsonResponse(res, safe=False)
|
705c63640c11485dc68ce69d7757642a84c5798c
| 3,640,760
|
def list_revisions_courses(request_ctx, course_id, url, per_page=None, **request_kwargs):
"""
List the revisions of a page. Callers must have update rights on the page in order to see page history.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param url: (required) ID
:type url: string
:param per_page: (optional) Set how many results canvas should return, defaults to config.LIMIT_PER_PAGE
:type per_page: integer or None
:return: List revisions
:rtype: requests.Response (with array data)
"""
if per_page is None:
per_page = request_ctx.per_page
path = '/v1/courses/{course_id}/pages/{url}/revisions'
payload = {
'per_page' : per_page,
}
url = request_ctx.base_api_url + path.format(course_id=course_id, url=url)
response = client.get(request_ctx, url, payload=payload, **request_kwargs)
return response
|
c6ef2cd08b1a98c5204dd0e6a52b55ef57dbc78a
| 3,640,761
|
def snr2Ivar(flux, snr):
"""
Estimate the inverse variance given flux and S/N.
Parameters
----------
flux : scalar or array of float
Flux of the obejct.
snr : scalar or array of float
Signal to noise ratio
"""
return 1.0 / ((flux / snr) ** 2.0)
|
91c76cd942a8f37f57a227ccb35cf4968a16193b
| 3,640,762
|
def revision_to_cashflows(rev, end_date):
"""Converts a revision to a list of cashflows
end_date -- the date from which we want to stop computing
"""
if rev.end_date is not None:
end_date = next_month(rev.end_date)
result = []
for first_of_month in first_of_month_range(rev.start_date, end_date):
start = max(first_of_month, rev.start_date)
end = next_month(first_of_month)
if rev.end_date is not None:
end = min(end, rev.end_date)
delta = end - start
total_days = monthrange(first_of_month.year, first_of_month.month)[1]
rent = fractional_amount(-rev.rent, delta.days, total_days)
result.append(Cashflow(first_of_month, rent, _("rent")))
if rev.provision != 0:
p = fractional_amount(-rev.provision, delta.days, total_days)
result.append(Cashflow(
first_of_month, p, _("provision")))
return result
|
51778e5c389420101d3ef6afab0e28b6aa708689
| 3,640,763
|
def filter_verified_user(path, community_user_dataFrame,verified_user_file,sep = ',',header = None):
"""
根据已经认证的用户文件,过滤到保留社区中的认证用户。
:param path:认证用户文件的保存路径。
:param community_user_dataFrame:社区用户数据框,两列,列名(user_id, community_id)。
:param verified_user_file:认证用户的文件,为CSV文件,格式为(user_id, is_verified, name),分隔符为逗号。
:return: 过滤掉认证用户之后的pandas数据框,格式与community_user_dataFrame相同。列名(user_id, community_id)。
"""
print 'fileter verified user'
verified_user_dataFrame = pd.read_csv(path + verified_user_file, names=['user_id', 'is_verified', 'name'],dtype={'user_id': np.str},sep = sep,header = header)
verified_user_dataFrame = verified_user_dataFrame[verified_user_dataFrame.is_verified == True]
del verified_user_dataFrame['is_verified']
del verified_user_dataFrame['name']
dataFrame = pd.DataFrame()
user_id_list = set(list(community_user_dataFrame.user_id))
verified_user_id_list = list(verified_user_dataFrame.user_id)
for user_id in user_id_list:
if user_id not in verified_user_id_list:
dataFrame = dataFrame.append(community_user_dataFrame[community_user_dataFrame.user_id == user_id],ignore_index=False)
print 'keep user: ', user_id
else:
print 'delete user: ', user_id
pass
return dataFrame
|
a571f72000e0a52e8f5aa0d5ae61cd07e2d7189d
| 3,640,764
|
from datetime import datetime
def calculate(series):
"""
:param series: a list of lists of [[(),()], [(),()]] for every swc tube in the pixel
:return:
"""
# gets every dates tuple in the list
dates = [t for t, v in series]
# define the dates as a set
ds = set(dates[0])
# get the intersection of every other set.
for d in dates[1:]:
ds = ds.intersection(set(d))
def func(di):
""""""
# check for matching dates in the intersected and ordered set with the values from the series.
ns = [get_matching_date(di, zip(*cs)) for cs in series] # ns is the matching values
# if the value is not none...
ns = [ni for ni in ns if ni is not None]
# print "Here is your ns {}".format(ns)
# calculate the error of the mean for that value.
# if ns:
# return datetime.strptime(di, '%m/%d/%Y'), calculate_sem(ns) #, calculate_avg(ns)
if ns:
return datetime.strptime(di, '%m/%d/%Y'), ns
# sets are NOT ordered so you need to find the ones that match up.
# vs = [func(di) for di in sorted(list(ds), reverse=True)]
storages = [func(di) for di in sorted(list(ds), reverse=True)]
# vs = [vi for vi in vs if vi is not None]
storages = [i for i in storages if i is not None]
# return zip(*vs)
# print "length storages {}".format(len(storages))
# print "STORAGES {}".format(storages)
# print "length storages {}".format(len(zip(*storages)))
# print "STORAGES {}".format(zip(*storages))
return zip(*storages)
|
136fa5fd72bad6c1cb2938e25adc44d768caf43b
| 3,640,765
|
from typing import Tuple
def load_dataset(filename: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Load dataset for comparing the Gaussian Naive Bayes and LDA classifiers. File is assumed to be an
ndarray of shape (n_samples, 3) where the first 2 columns represent features and the third column the class
Parameters
----------
filename: str
Path to .npy data file
Returns
-------
X: ndarray of shape (n_samples, 2)
Design matrix to be used
y: ndarray of shape (n_samples,)
Class vector specifying for each sample its class
"""
dataset = np.load(f"..//datasets//{filename}")
X, y = dataset[:, :2], dataset[:, -1]
return X, y
|
15b6c7422fa397e13fc19de2a1e7681b73e3638c
| 3,640,766
|
import csv
def readCSV(name,shape = [None], delimiter = ","):
""" Lectura de archivo csv name
Devuelve matriz con los datos y cabecera
"""
data = []
with open(name, 'r') as f:
reader = csv.reader(f,delimiter = delimiter)
for row in reader:
data.append(row[slice(*shape)])
return data
|
789341daf51b2f1e92086a42698ea0fef1130505
| 3,640,767
|
from gdata.media import Category
def build_category(category):
"""Build a single-item list of a YouTube category.
This refers to the Category of a video entry, such as "Film" or "Comedy",
not the atom/gdata element. This does not check if the category provided
is valid.
Keyword arguments:
category: String representing the category.
Returns:
A single-item list of a YouTube category (type gdata.media.Category).
"""
return [Category(
text=category,
scheme='http://gdata.youtube.com/schemas/2007/categories.cat',
label=category)]
|
6906e30fcf1d72d7842ec5d7381a12842a9ded3e
| 3,640,768
|
def all(event, context):
""" retrieves all experiment results from redis
params:
- namespace (optional)
- scope (optional, comma-separated list of experiments)
"""
r = _redis()
namespace = event.get('namespace', 'alephbet')
scope = event.get('scope')
if scope:
experiments = scope.split(',')
else:
experiments = r.smembers("{0}:experiments".format(namespace))
results = []
results.append({'meta': {'scope': scope}})
for ex in experiments:
goals = experiment({'experiment': ex, 'namespace': namespace}, context)
results.append({'experiment': ex, 'goals': goals})
return results
|
583cc6a0101fbb6ef1ca12d6ddcfe76626bdd8dd
| 3,640,769
|
def check_submodule_update(job, position):
"""
Checks to see if certain submodules have been updated and post a comment to the PR if so.
"""
output = get_output_by_position(job, position)
modules = find_in_output(output, "CIVET_CLIENT_SUBMODULE_UPDATES")
if not modules:
return False
if not job.event.pull_request or not job.event.pull_request.review_comments_url:
return False
for mod in modules.split():
api = job.event.build_user.api()
url = job.event.pull_request.review_comments_url
sha = job.event.head.sha
msg = "**Caution!** This contains a submodule update"
# The 2 position will leave the message on the new submodule hash
api.pr_review_comment(url, sha, mod, 2, msg)
return True
|
ca6772b516fca899d6196cea47aa4185d958ec48
| 3,640,771
|
def _compute_subseq_errors_direct(series, weights):
"""
Subsequence errors (using one pass formulation)
:param Array{Float64,1} series
:param Array{Float64,1} weights
The subsequence errors is:
$$\begin{align}
E[i,j] &= Q[i,j] - \frac{S[i,j]^2}{W[i,j]}
\end{align}$$
Were W, S, Q are upper diagonal matrices:
$$\begin{align}
W[i,j] &\equiv \sum_{k=i}^j w_k \\
S[i,j] &\equiv \sum_{k=i}^j w_k x_k \\
Q[i,j] &\equiv \sum_{k=i}^j w_k {x_k}^2
\end{align}$$
https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
Because $$Q[i,j]$$ and $$\frac{S[i,j]^2}{W[i,j]}$$ can be very similar numbers,
cancellation can lead to the precision of the result to be much less than
the inherent precision of the floating-point arithmetic used to perform the computation.
Thus this algorithm should not be used in practice.
This is particularly bad if the standard deviation is small relative to the mean.
"""
N = np.size(series)
wgts = np.diag(weights)
wsum = np.diag(weights * series)
sqrs = np.diag(weights * series * series)
dists = np.zeros((N, N), dtype=np.float)
means = np.diag(series)
# Fill the upper triangle of dists and means by performing up-right
# diagonal sweeps through the matrices
for delta in range(0, N):
for l in range(0, (N-1-delta)):
# l = left boundary, r = right boundary
r = l + delta + 1
# Incrementally update every partial sum
wgts[l, r] = wgts[l, r-1] + wgts[r, r]
wsum[l, r] = wsum[l, r-1] + wsum[r, r]
sqrs[l, r] = sqrs[l, r-1] + sqrs[r, r]
# Calculate the mean over the range
means[l, r] = 0 if (wgts[l, r] == 0) else wsum[l, r] / wgts[l, r]
dists[l, r] = sqrs[l, r] - means[l, r] * wsum[l, r]
if dists[l, r] < 0:
print("[WARNING] Numerical instability detected, dists[", l, ", ", r, "] is negative: ", dists[l, r])
return dists, means
|
d8083b2b19102d51ee10baf2907890663f1d2b82
| 3,640,772
|
def preprocess_dataframe(data):
"""Helper method to preprocess the dataframe.
Creates new columns for year,month,recalls and percentage change.
Limits the date range for the experiment (these data are trustworthy)."""
data['recalls'] = data['doc_count'] + 1
data.drop(columns=['product', 'Unnamed: 0', 'key', 'key_as_string', 'doc_count'], inplace=True)
data = data.resample("M").sum()
mask = (data.index > '2007-05-31') & (data.index < '2019-09-30')
data = data.loc[mask]
data['pct'] = data['recalls'].pct_change()
return data
|
f6670cac1319108c88ee9ee409ce0ecdd1eca746
| 3,640,773
|
def is_solution(x:int, y:int) -> bool:
"""Returns try if (x, y) is a solution."""
# x and y are the values in a sequence of 15 terms of the following form:
# xxxxyxxxxxyxxxx
# x must be a positive integer
if x <= 0:
return False
# y must be a negative integer
if y >= 0:
return False
# a run of 6 consecutive terms must be positive
if 5 * x + y <= 0:
return False
# a run of 11 consecutive terms must be negative
if 9 * x + 2 * y >= 0:
return False
# x must be <= 16 or y must be >= 16
return x <= 16 or y >= -16
|
5e620fc390f6a79fd25d00c8c8b51d0af788d48c
| 3,640,774
|
def load_crl(file):
# type: (AnyStr) -> CRL
"""
Load CRL from file.
:param file: Name of file containing CRL in PEM format.
:return: M2Crypto.X509.CRL object.
"""
with BIO.openfile(file) as f:
cptr = m2.x509_crl_read_pem(f.bio_ptr())
return CRL(cptr, 1)
|
d711503c78edbb722189d7a06340ab9f719f853f
| 3,640,775
|
def clean_and_lemmatize(text):
"""
Clean and lemmatize the text of a Tweet
Returns:
cleaned_text (string): The cleaned and lemmatized text.
"""
wnl = WordNetLemmatizer() # NLTK lemmatizer
converted_tweet = clean_and_tokenize(
text) # cleans the text and tokenize it
tagged = nltk.pos_tag(converted_tweet) # POS tag the tokenized Tweet
wordnet_tagged = list(
map(lambda x: (x[0], pos_tagger(x[1])), tagged))
lemmatized_sentence = []
# loop through each word in the tagged list
for word, tag in wordnet_tagged:
if tag is None:
# if there is no available tag, append the word as is
lemmatized_sentence.append(word)
else:
# else use the tag to lemmatize the word
lemmatized_sentence.append(wnl.lemmatize(word, tag))
# attached lemmatized words to a string
cleaned_text = " ".join(lemmatized_sentence)
return cleaned_text
|
0635e08aca69191d77ad652dcf752254fbfc2ea6
| 3,640,776
|
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding
Arguments:
in_planes {int} -- Number of channels in the input image
out_planes {int} -- Number of channels produced by the convolution
Keyword Arguments:
stride {int or tuple, optional} -- Stride of the convolution. Default: 1 (default: {1})
groups {int, optional} -- Number of blocked connections from input channels to output channels.tion] (default: {1})
dilation {int or tuple, optional} -- Spacing between kernel elements (default: {1})
Returns:
output layer of 3x3 convolution with padding
"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
|
1f55153bb35dd56b7cca2b00e13a0f4e5a963248
| 3,640,777
|
def task_finish(request, pk):
"""タスクを完了するAPI
:param request:
:param pk:
:return:
"""
task = get_object_or_404(models.Task, pk=pk)
prev_task = task.get_prev_task()
if prev_task is None or prev_task.can_continue():
task.status = '99'
task.updated_user = request.user
task.save()
serializer = serializers.TaskSerializer(task)
return Response(serializer.data)
else:
return Response({'detail': constants.ERROR_PREV_TASK_UNFINISHED}, status=400)
|
b995edd022e27b6101c9c79874af4fc78a01afe2
| 3,640,778
|
def fine_tune():
"""recreates top model architecture/weights and fine tunes with image augmentation and optimizations"""
# reconstruct vgg16 model
model = Sequential()
model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load vgg16 weights
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
# add the classification layers
top_model = Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
top_model.load_weights(top_model_weights_path)
# add the model on top of the convolutional base
model.add(top_model)
# set the first 25 layers (up to the last conv block)
# to non-trainable (weights will not be updated)
for layer in model.layers[:25]:
layer.trainable = False
# compile the model with a SGD/momentum optimizer
# and a very slow learning rate.
model.compile(loss='binary_crossentropy',
optimizer=optimizers.SGD(lr=1e-4, momentum=0.9),
metrics=['accuracy'])
# prepare data augmentation configuration
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_height, img_width),
batch_size=32,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_height, img_width),
batch_size=32,
class_mode='binary')
# fine-tune the model
model.fit_generator(
train_generator,
samples_per_epoch=nb_train_samples,
nb_epoch=nb_epoch,
validation_data=validation_generator,
nb_val_samples=nb_validation_samples,
callbacks=[early_stopping])
# save the model
json_string = model.to_json()
with open('final_model_architecture.json', 'w') as f:
f.write(json_string)
model.save_weights('final_weights.h5')
# return the model for convenience when making predictions
return model
|
69982c739927fca8f1c8e3779a7358a7fc646a5f
| 3,640,780
|
def normalize(a, seqlength=None, rv=None):
"""
Normalize the VSA vector
:param a: input VSA vector
:param seqlength: Optional, for BSC vectors must be set to a valid.
:param rv: Optional random vector, used for splitting ties on binary and ternary VSA vectors.
:return: new VSA vector
"""
return a.normalize(a, seqlength, rv)
|
ef8ec307add55a56be5991bb13579bc989726d3c
| 3,640,781
|
def MACD(df, n_fast, n_slow):
"""Calculate MACD, MACD Signal and MACD difference
:param df: pandas.DataFrame
:param n_fast:
:param n_slow:
:return: pandas.DataFrame
"""
EMAfast = pd.Series(df['Close'].ewm(span=n_fast, min_periods=n_slow).mean())
EMAslow = pd.Series(df['Close'].ewm(span=n_slow, min_periods=n_slow).mean())
MACD = pd.Series(EMAfast - EMAslow, name='MACD_' + str(n_fast) + '_' + str(n_slow))
MACDsign = pd.Series(MACD.ewm(span=9, min_periods=9).mean(), name='MACDsign_' + str(n_fast) + '_' + str(n_slow))
MACDdiff = pd.Series(MACD - MACDsign, name='MACDdiff_' + str(n_fast) + '_' + str(n_slow))
df = df.join(MACD)
df = df.join(MACDsign)
df = df.join(MACDdiff)
return df
|
368f80feb27bd67a387b0b9abb652d53205d22ac
| 3,640,782
|
def ecef2map(xyz, spatialRef):
""" transform 3D cartesian Earth Centered Earth fixed coordinates, to
map coordinates (that is 2D) in a projection frame
Parameters
----------
xyz : np.array, size=(m,3), float
np.array with 3D coordinates, in WGS84. In the following form:
[[x, y, z], [x, y, z], ... ]
spatialRef : osgeo.osr.SpatialReference
target projection
Returns
-------
xyz : np.array, size=(m,2), float
np.array with planar coordinates, within a given projection frame
"""
if isinstance(spatialRef, str):
spatialStr = spatialRef
spatialRef = osr.SpatialReference()
spatialRef.ImportFromWkt(spatialStr)
llh = ecef2llh(xyz) # get spherical coordinates and height
xy = ll2map(llh[:, :-1], spatialRef)
return xy
|
f7177912c931cfe6c2cee5737ed6bd2afedeba08
| 3,640,783
|
def dis2speed(t, dis):
"""
Return speed in distance travelled per hour.
Args:
t (datetime64[ms]): 1D array with time.
dis (float ): 1D array with distance travelled.
Returns:
float: 1D array with speed data.
"""
# divide by one hour (=3600 x 1000 milliseconds)
speed = np.diff(dis) / (np.float64(np.diff(t))/1000) *3600
speed = np.r_[np.nan, speed]
return speed
|
4dae87509ad44604949f1d3f925f3b28947b9952
| 3,640,784
|
def default_context(plugin, context):
"""
Return the default context for plugins rendered with a template, which
simply is a single variable named ``plugin`` containing the plugin
instance.
"""
return {"plugin": plugin}
|
5f7a88c02b6c11a150197e50a5be1847cba422b0
| 3,640,785
|
def get_model_from_key(model_name):
"""
Gets the model from a given key.
param:
model_name: name of model
return:
object
"""
_known_models = {}
#populate
for klass in Model.__subclasses__():
_known_models[klass.__name__] = klass
for sub in klass.__subclasses__():
_known_models[sub.__name__] = sub
return _known_models.get(model_name, None)
|
488c52635bfdf10c79936b0f33b84d0222d1ae5b
| 3,640,786
|
def default_fields(
coll_id=None, type_id=None, entity_id=None,
width=12, **kwargs):
"""
Returns a function that accepts a field width and returns a dictionary of entity values
for testing. The goal is to isolate default entity value settings from the test cases.
"""
def_label = kwargs.get("default_label",
default_label(coll_id=coll_id, type_id=type_id, entity_id=entity_id)
)
def_comment = kwargs.get("default_comment",
default_comment(coll_id=coll_id, type_id=type_id, entity_id=entity_id)
)
def_label_esc = def_label.replace("'", "'")
def_comment_esc = def_comment.replace("'", "'")
def_entity_url = collection_entity_view_url(coll_id=coll_id, type_id=type_id, entity_id=entity_id)
collection_url = collection_view_url(coll_id).rstrip("/")
def def_fields(width=12):
fields = layout_classes(width=width)
fields.update(
{ 'coll_id': coll_id
, 'type_id': type_id
, 'entity_id': entity_id
, 'default_label': def_label
, 'default_comment': def_comment
, 'default_label_esc': def_label_esc
, 'default_comment_esc': def_comment_esc
, 'default_entity_url': def_entity_url
, 'collection_url': collection_url
})
if kwargs:
fields.update(kwargs)
return fields
return def_fields
|
1670f95a8e95f84ca5f08aab8ee0a8effb1e6f76
| 3,640,787
|
import traceback
def predict_using_broadcasts(feature1, feature2, feature3, feature4):
"""
Scale the feature values and use the model to predict
:return: 1 if normal, -1 if abnormal 0 if something went wrong
"""
prediction = 0
x_test = [[feature1, feature2, feature3, feature4]]
try:
x_test = SCL.value.transform(x_test)
prediction = CLF.value.predict(x_test)[0]
except ValueError:
traceback.print_exc()
print('Cannot predict:', x_test)
return int(prediction)
|
4f766002e11fbe34f3479769db752c0df08b2df5
| 3,640,788
|
import torch
def make_positions(tensor, padding_idx, left_pad):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
max_pos = padding_idx + 1 + tensor.size(1)
device = tensor.get_device()
buf_name = f'range_buf_{device}'
if not hasattr(make_positions, buf_name):
setattr(make_positions, buf_name, tensor.new())
setattr(make_positions, buf_name, getattr(make_positions, buf_name).type_as(tensor))
if getattr(make_positions, buf_name).numel() < max_pos:
torch.arange(padding_idx + 1, max_pos, out=getattr(make_positions, buf_name))
mask = tensor.ne(padding_idx)
positions = getattr(make_positions, buf_name)[:tensor.size(1)].expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
new_tensor = tensor.clone()
return new_tensor.masked_scatter_(mask, positions[mask]).long()
|
8e65c68daae2e40710c777d6e74f048b8b0ad547
| 3,640,789
|
def teq(state, *column_values):
"""Tag-Equals filter. Expects, that a first row contains tags and/or metadata
Tag row is ignored in comparison, but prepended to the result (in order to maintain the first row in the results).
Accepts one or more column-value pairs. Keep only rows where value in the column equals specified value.
Example: teq-column1-1
"""
df = state.get()
tags = df.iloc[:1, :]
df = df.iloc[1:, :]
assert state.type_identifier == "dataframe"
for i in range(0, len(column_values), 2):
c = column_values[i]
v = column_values[i + 1]
state.log_info(f"Equals: {c} == {v}")
index = np.array([x == v for x in df[c]], np.bool)
try:
if int(v) == float(v):
index = index | (df[c] == int(v))
else:
index = index | (df[c] == float(v))
except:
pass
df = df.loc[index, :]
df = tags.append(df, ignore_index=True)
return state.with_data(df)
|
7fd2786dcbe8705b48081c6bc96dcdc7452e35d3
| 3,640,790
|
def adjust_labels(data_y, dataset, pred_type='actions'):
"""
Transforms original labels into the range [0, nb_labels-1]
:param data_y: numpy integer array
Sensor labels
:param pred_type: string, ['gestures', 'locomotion', 'actions', 'tasks']
Type of activities to be recognized
:return: numpy integer array
Modified sensor labels
"""
data_y[data_y == "null_class"] = 0
if dataset == 'wetlab':
if pred_type == 'tasks': # Labels for tasks are adjusted
data_y[data_y == "1solvent"] = 1
data_y[data_y == "2catalysator"] = 2
data_y[data_y == "3cutting"] = 3
data_y[data_y == "4mixing"] = 4
data_y[data_y == "5catalysator"] = 5
data_y[data_y == "6waterbath"] = 6
data_y[data_y == "7solvent"] = 7
data_y[data_y == "8catalysator"] = 8
data_y[data_y == "9cutting"] = 9
data_y[data_y == "10mixing"] = 10
data_y[data_y == "11catalysator"] = 11
data_y[data_y == "12waterbath"] = 12
data_y[data_y == "13waterbath"] = 13
data_y[data_y == "14catalysator"] = 14
data_y[data_y == "15pestling"] = 15
data_y[data_y == "16filtrate"] = 16
data_y[data_y == "17catalysator"] = 17
data_y[data_y == "18pouring"] = 18
data_y[data_y == "19detect"] = 19
data_y[data_y == "20waterbath"] = 20
data_y[data_y == "21catalysator"] = 21
data_y[data_y == "22pestling"] = 22
data_y[data_y == "23filtrate"] = 23
data_y[data_y == "24catalysator"] = 24
data_y[data_y == "25pouring"] = 25
data_y[data_y == "26detect"] = 26
data_y[data_y == "27end"] = 27
elif pred_type == 'actions': # Labels for actions are adjusted
data_y[data_y == "cutting"] = 1
data_y[data_y == "inverting"] = 2
data_y[data_y == "peeling"] = 3
data_y[data_y == "pestling"] = 4
data_y[data_y == "pipetting"] = 5
data_y[data_y == "pouring"] = 6
data_y[data_y == "pour catalysator"] = 6
data_y[data_y == "stirring"] = 7
data_y[data_y == "transfer"] = 8
elif dataset == 'sbhar':
data_y[data_y == 'walking'] = 1
data_y[data_y == 'walking_upstairs'] = 2
data_y[data_y == 'walking_downstairs'] = 3
data_y[data_y == 'sitting'] = 4
data_y[data_y == 'standing'] = 5
data_y[data_y == 'lying'] = 6
data_y[data_y == 'stand-to-sit'] = 7
data_y[data_y == 'sit-to-stand'] = 8
data_y[data_y == 'sit-to-lie'] = 9
data_y[data_y == 'lie-to-sit'] = 10
data_y[data_y == 'stand-to-lie'] = 11
data_y[data_y == 'lie-to-stand'] = 12
elif dataset == 'rwhar' or dataset == 'rwhar_3sbjs':
data_y[data_y == 'climbing_down'] = 0
data_y[data_y == 'climbing_up'] = 1
data_y[data_y == 'jumping'] = 2
data_y[data_y == 'lying'] = 3
data_y[data_y == 'running'] = 4
data_y[data_y == 'sitting'] = 5
data_y[data_y == 'standing'] = 6
data_y[data_y == 'walking'] = 7
elif dataset == 'hhar':
data_y[data_y == 'bike'] = 1
data_y[data_y == 'sit'] = 2
data_y[data_y == 'stand'] = 3
data_y[data_y == 'walk'] = 4
data_y[data_y == 'stairsup'] = 5
data_y[data_y == 'stairsdown'] = 6
elif dataset == 'opportunity' or 'opportunity_ordonez':
if pred_type == 'locomotion':
data_y[data_y == "stand"] = 1
data_y[data_y == "walk"] = 2
data_y[data_y == "sit"] = 3
data_y[data_y == "lie"] = 4
elif pred_type == 'gestures':
data_y[data_y == 'open_door_1'] = 1
data_y[data_y == 'open_door_2'] = 2
data_y[data_y == 'close_door_1'] = 3
data_y[data_y == 'close_door_2'] = 4
data_y[data_y == 'open_fridge'] = 5
data_y[data_y == 'close_fridge'] = 6
data_y[data_y == 'open_dishwasher'] = 7
data_y[data_y == 'close_dishwasher'] = 8
data_y[data_y == 'open_drawer_1'] = 9
data_y[data_y == 'close_drawer_1'] = 10
data_y[data_y == 'open_drawer_2'] = 11
data_y[data_y == 'close_drawer_2'] = 12
data_y[data_y == 'open_drawer_3'] = 13
data_y[data_y == 'close_drawer_3'] = 14
data_y[data_y == 'clean_table'] = 15
data_y[data_y == 'drink_from_cup'] = 16
data_y[data_y == 'toggle_switch'] = 17
return data_y
|
1d201a20a8865cd505c0ee6b5385622a0ae28817
| 3,640,791
|
def is_str_or_bytes(x):
""" True if x is str or bytes.
This doesn't use rpartial to avoid infinite recursion.
"""
return isinstance(x, (str, bytes, bytearray))
|
ff4bf19177ffe62f24713e077824e48ec45f8587
| 3,640,792
|
def _type_convert(new_type, obj):
"""
Convert type of `obj` to `force`.
"""
return new_type(obj)
|
fc47c100508d41caa7ffc786746b58e3d6f684e2
| 3,640,793
|
def create_tokenizer(corpus_file, vocab_size):
"""Create a tokenizer from a corpus file
Args:
corpus_file (Pathlib path): File containng corpus i.e. all unique words for
vocab_size (int): Vocabulary size of the tokenizer
Returns:
hugging_face tokenizer: Byte pair tokenizer used to tokenize text
"""
tokenizer = Tokenizer(BPE())
trainer = BpeTrainer(
special_tokens=["<pad>", "<s>", "</s>", "<unk>"], vocab_size=vocab_size
)
tokenizer.pre_tokenizer = Whitespace()
files = [str(corpus_file)]
tokenizer.train(trainer, files)
tokenizer.post_processor = TemplateProcessing(
single="<s> $A </s>",
special_tokens=[
("<s>", tokenizer.token_to_id("<s>")),
("</s>", tokenizer.token_to_id("</s>")),
],
)
tokenizer.enable_padding(
pad_token="<pad>",
pad_id=tokenizer.token_to_id("<pad>"),
)
return tokenizer
|
6de30b057d920d650f065af0f9083130fbb6df77
| 3,640,794
|
def get_readings(tag):
"""Get sensor readings and collate them in a dictionary."""
try:
enable_sensors(tag)
readings = {}
# IR sensor
readings["ir_temp"], readings["ir"] = tag.IRtemperature.read()
# humidity sensor
readings["humidity_temp"], readings["humidity"] = tag.humidity.read()
# barometer
readings["baro_temp"], readings["pressure"] = tag.barometer.read()
# luxmeter
readings["light"] = tag.lightmeter.read()
# battery
readings["battery"] = tag.battery.read()
disable_sensors(tag)
# round to 2 decimal places for all readings
readings = {key: round(value, 2) for key, value in readings.items()}
return readings
except BTLEException as error:
print("Unable to take sensor readings. {}".format(error))
return {}
|
481aae840d9ab41995086e3ef98c500abf4ec82e
| 3,640,795
|
import binascii
def digita_gw(request):
"""
Digita GW endpoint implementation
"""
identifier = request.data['DevEUI_uplink']['DevEUI']
apsen = core.models.apartment_sensor_models.ApartmentSensor.objects.get_or_create(identifier=identifier)[0]
payload = binascii.unhexlify(request.data['DevEUI_uplink']['payload_hex'])
decoded_payload = decode_elsys_payload(payload)
mapping = settings.DIGITA_GW_PAYLOAD_TO_ATTRIBUTES # type: dict
new_values = []
for key, value in decoded_payload.items():
uri = mapping.get(key, '')
if uri:
attr = core.models.sensor_models.SensorAttribute.objects.get_or_create(uri=uri, defaults={'description': key})[0]
else:
attr = core.models.sensor_models.SensorAttribute.objects.get_or_create(description=key)[0]
apsen_attr = apsen.attributes.get_or_create(attribute=attr)[0]
new_values.append(apsen_attr.values.create(value=value))
models.Subscription.handle_new_values(new_values)
return Response({"message": "Updated successfully"})
|
e6bf45c92ea61278dd47e52ed945e91aa514d21b
| 3,640,796
|
def resize_img(_img, maxdims=(1000, 700)):
"""
Resize a given image. Image can be either a Pillow Image, or a NumPy array. Resizing is done automatically such
that the entire image fits inside the given maxdims box, keeping aspect ratio intact
:param _img:
:param maxdims:
:return:
"""
try:
# If NumPy array, create Pillow Image
img = Image.fromarray(_img)
except TypeError:
# Else image must already be a Pillow Image
img = _img
ratio = max(img.size[1] / maxdims[0], img.size[0] / maxdims[1])
image = img.resize((int(img.size[0] / ratio), int(img.size[1] / ratio)), Image.ANTIALIAS)
return image
|
54716e4ce030a675a0655b06d7121d4c38bd7c43
| 3,640,797
|
def natural_key(s):
"""Converts string ``s`` into a tuple that will sort "naturally"
(i.e., ``name5`` will come before ``name10`` and ``1`` will come
before ``A``). This function is designed to be used as the ``key``
argument to sorting functions.
:param s: the str/unicode string to convert.
:rtype: tuple
"""
# Use _nkre to split the input string into a sequence of
# digit runs and non-digit runs. Then use _nkconv() to convert
# the digit runs into ints and the non-digit runs to lowercase.
return tuple(_nkconv(m) for m in _nkre.findall(s))
|
d0eb51bdd3e7c6caa5b13c38269bc9c07e3834d2
| 3,640,798
|
def SetDocTimestampFrequency(doc:NexDoc, freq:float):
"""Sets the document timestamp frequency"""
return NexRun("SetDocTimestampFrequency", locals())
|
dceb766792b6ca34d5f5759b0079acc5b70da5a9
| 3,640,800
|
def rsqrt(x:np.ndarray):
"""Computes reciprocal of square root of x element-wise.
Args:
x: input tensor
Returns:
output tensor
Examples:
>>> x = np.array([2., 0., -2.])
>>> rsqrt(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.707, inf, nan], dtype=float32)>
"""
return 1/np.sqrt(x)
|
f219ae71b5136bc1b34a2bb06ec76dcdb7ee20bb
| 3,640,801
|
def is_card(obj):
"""Return true if the object is a card."""
return obj in CARDS_SET
|
21f155feadde94d652e120224a2712f2470a9926
| 3,640,802
|
def plot_lines(
y: tuple,
x: np.ndarray = None,
points: bool = True,
x_axis_label: str = 'Index',
y_axis_label: str = 'Value',
plot_width: int = 1000,
plot_height: int = 500,
color: tuple = None,
legend: tuple = None,
title: str = 'Graph lines',
show_graph: bool = True
) -> figure:
"""
Plot lines from y tuple. Number of lines equal len(y)
with plot params
"""
if x is None:
x = np.arange(len(y[0]))
if legend is None:
legend = [f'Line {i}' for i in range(len(y))]
if color is None:
color = COLORS
fig = figure(title=title, x_axis_label=x_axis_label,
y_axis_label=y_axis_label, plot_width=plot_width,
plot_height=plot_height)
for i in range(len(y)):
fig.line(
y=y[i],
x=x,
color=color[i],
legend=legend[i]
)
if points is not None:
fig.circle(
y=y[i],
x=x,
fill_color=color[i]
)
if show_graph:
show(fig)
return fig
|
b938151b90005bc23bb9ed2f795dbf4620b26251
| 3,640,803
|
import re
def obtain_csrf(session):
"""
Obtain the CSRF token from the login page.
"""
resp = session.get(FLOW_LOGIN_GET_URL)
contents = str(resp.content)
match = re.search(r'csrfToken" value="([a-z0-9\-]+)"', contents)
return match.group(1)
|
a091ca33b6b0a43608261e46c54c7ae164a9d3af
| 3,640,804
|
def get_distance_curve(
kernel,
lambda_values,
N,
M=None,
):
""" Given number of elements per class, full kernel (with first N rows corr.
to mixture and the last M rows corr. to component, and set of lambda values
compute $\hat d(\lambda)$ for those values of lambda"""
d_lambda = []
if M == None:
M = kernel.shape[0] - N
prev_soln = None
for lambda_value in lambda_values:
u_lambda = lambda_value / N * np.concatenate((np.ones((N, 1)),
np.zeros((M, 1)))) + (1 - lambda_value) / M \
* np.concatenate((np.zeros((N, 1)), np.ones((M, 1))))
(solution, distance_sqd) = \
find_nearest_valid_distribution(u_lambda, kernel, initial=prev_soln)
prev_soln = solution
d_lambda.append(sqrt(distance_sqd))
d_lambda = np.array(d_lambda)
return d_lambda
|
e085ea6b2122b052625df1c7b60115552112ffab
| 3,640,805
|
def _process_labels(labels, label_smoothing):
"""Pre-process a binary label tensor, maybe applying smoothing.
Parameters
----------
labels : tensor-like
Tensor of 0's and 1's.
label_smoothing : float or None
Float in [0, 1]. When 0, no smoothing occurs. When positive, the binary
ground truth labels `y_true` are squeezed toward 0.5, with larger values
of `label_smoothing` leading to label values closer to 0.5.
Returns
-------
torch.Tensor
The processed labels.
"""
assert label_smoothing is not None
labels = (1 - label_smoothing) * labels + label_smoothing * 0.5
return labels
|
5a71ded8ac9d3ef4b389542814a170f35ef18fdd
| 3,640,806
|
def guess_digit(image, avgs):
"""Return the digit whose average darkness in the training data is
closest to the darkness of ``image``. Note that ``avgs`` is
assumed to be a defaultdict whose keys are 0...9, and whose values
are the corresponding average darknesses across the training data."""
darkness = sum(image)
distances = {k: abs(v-darkness) for k, v in avgs.iteritems()}
return min(distances, key=distances.get)
|
055a0d31f85ce6f5786d6bd6dfaed75bdb3ff5d6
| 3,640,807
|
import time
def multiple_writes(self,
Y_splits,
Z_splits,
X_splits,
out_dir,
mem,
filename_prefix="bigbrain",
extension="nii",
nThreads=1,
benchmark=False):
"""
Split the input image into several splits,
all share with the same shape
For now only support .nii extension
:param Y_splits: How many splits in Y-axis
:param Z_splits: How many splits in Z-axis
:param X_splits: How many splits in X-axis
:param out_dir: Output Splits dir
:param mem: memory load each round
:param filename_prefix: each split's prefix filename
:param extension: extension of each split
:param nThreads: number of threads to trigger in each writing process
:param benchmark: If set to true the function will return
a dictionary containing benchmark information.
:return:
"""
def threaded_multiple():
'''# Using multi-threading to send data to hdfs in parallel,
# which will parallelize writing process.
# nThreads: number of threads that are working on writing
# data at the same time.
print("start {} threads to write data...".format(nThreads))
# separate all the splits' metadata to several pieces,
# each piece contains #nThreads splits' metadata.
caches = _split_arr(one_round_split_metadata.items(), nThreads)
st1 = time()
for thread_round in caches:
tds = []
# one split's metadata triggers one thread
for i in thread_round:
ix = i[1]
data = data_in_range[ix[0]: ix[1],
ix[2]: ix[3],
ix[4]: ix[5]]
td = threading.Thread(target=write_array_to_file,
args=(data, i[0], 0, benchmark))
td.start()
tds.append(td)
del data
for t in tds:
t.join()'''
pass
def compute_sizes(Y_splits, Z_splits, X_splits):
''' A function.
'''
# calculate remainder based on the original image file
Y_size, Z_size, X_size = self.header.get_data_shape()
bytes_per_voxel = self.header['bitpix'] / 8
if (X_size % X_splits != 0
or Z_size % Z_splits != 0
or Y_size % Y_splits != 0):
raise Exception("There is remainder after splitting, \
please reset the y,z,x splits")
x_size = X_size / X_splits
z_size = Z_size / Z_splits
y_size = Y_size / Y_splits
return ((x_size, z_size, y_size),
(X_size, Z_size, Y_size),
bytes_per_voxel)
def file_manipulation_multiple(sizes, Sizes, filename_prefix):
''' A function.
'''
x_size, z_size, y_size = sizes
X_size, Z_size, Y_size = Sizes
# get all split_names and write them to the legend file
split_names = generate_splits_name(y_size, z_size, x_size, Y_size,
Z_size, X_size, out_dir,
filename_prefix,
extension)
generate_legend_file(split_names, "legend.txt", out_dir)
# generate all the headers for each split
# in order to reduce overhead when reading headers of splits
# from hdfs, create a header cache in the local environment
print("create split meta data dictionary...")
split_meta_cache = generate_headers_of_splits(split_names,
y_size,
z_size,
x_size,
self.header
.get_data_dtype())
print("Get split indexes...")
split_indexes = get_indexes_of_all_splits(split_names,
split_meta_cache,
Y_size, Z_size)
return split_indexes, split_names, split_meta_cache
def get_metadata_multiple(split_indexes,
split_names,
split_meta_cache,
from_x_index):
''' A function.
'''
# create split metadata for all splits(position, write_range, etc.)
one_round_split_metadata = {}
for split_name in split_names:
if check_in_range(next_read_index, split_indexes[split_name]):
split = split_meta_cache[split_name]
(X_index_min, X_index_max,
x_index_min, x_index_max) = \
extract_slices_range(split,
next_read_index, Y_size,
Z_size)
y_index_min = int(split.split_pos[-3])
z_index_min = int(split.split_pos[-2])
y_index_max = y_index_min + split.split_y
z_index_max = z_index_min + split.split_z
one_round_split_metadata[split_name] = \
(y_index_min, y_index_max, z_index_min, z_index_max,
X_index_min - from_x_index,
X_index_max - from_x_index + 1)
return one_round_split_metadata
def loop_multiple(next_read_index,
bytes_per_voxel,
Sizes,
split_indexes,
split_names,
split_meta_cache,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number,
benchmark):
''' A function.
'''
split_read_time = 0
split_nb_seeks = 0
X_size, Z_size, Y_size = Sizes
original_img_voxels = X_size * Y_size * Z_size
next_read_offsets = (next_read_index[0] * bytes_per_voxel,
next_read_index[1] * bytes_per_voxel + 1)
print("From {} to {}".format(next_read_offsets[0],
next_read_offsets[1]))
from_x_index = index_to_voxel(next_read_index[0],
Y_size, Z_size)[2]
to_x_index = index_to_voxel(next_read_index[1] + 1,
Y_size, Z_size)[2]
# read
print("Start reading data to memory...")
if benchmark:
t = time()
data_in_range = \
self.proxy.dataobj[..., int(from_x_index): int(to_x_index)]
if benchmark:
read_time = time() -t
print('read time ', read_time)
split_read_time += read_time
split_nb_seeks += 1
one_round_split_metadata = get_metadata_multiple(split_indexes,
split_names,
split_meta_cache,
from_x_index)
caches = _split_arr(one_round_split_metadata.items(), nThreads)
threaded_multiple()
for round in caches:
for i in round:
ix = i[1]
ix = list(map(lambda x: int(x), ix))
data = data_in_range[ix[0]:ix[1], ix[2]:ix[3], ix[4]:ix[5]]
if benchmark:
seek_time, write_time, seek_number = \
write_array_to_file(data, i[0], 0, benchmark)
split_write_time += write_time
split_seek_time += seek_time
split_nb_seeks += seek_number
print("writing data takes ", write_time)
else:
write_array_to_file(data, i[0], 0, benchmark)
next_read_index = (next_read_index[1] + 1,
next_read_index[1] + voxels)
# last write, write no more than image size
if next_read_index[1] >= original_img_voxels:
next_read_index = (next_read_index[0], original_img_voxels - 1)
del caches
del one_round_split_metadata
del data_in_range
if benchmark:
return (next_read_index,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number)
else:
return next_read_index
# begin algorithm
split_read_time = 0
split_seek_time = 0
split_write_time = 0
split_seek_number = 0
# preparation
sizes, Sizes, bytes_per_voxel = compute_sizes(Y_splits,
Z_splits,
X_splits)
X_size, Z_size, Y_size = Sizes
original_img_voxels = X_size * Y_size * Z_size
(split_indexes,
split_names,
split_meta_cache) = \
file_manipulation_multiple(sizes,
Sizes,
filename_prefix)
# drop the remainder which is less than one slice
# if mem is less than one slice, then set mem to one slice
mem = mem - mem % (Y_size * Z_size * bytes_per_voxel) \
if mem >= Y_size * Z_size * bytes_per_voxel \
else Y_size * Z_size * bytes_per_voxel
voxels = mem // bytes_per_voxel # get how many voxels per round
next_read_index = (0, voxels - 1)
while True:
if benchmark:
(next_read_index,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number) = (loop_multiple(next_read_index,
bytes_per_voxel,
Sizes,
split_indexes,
split_names,
split_meta_cache,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number,
benchmark))
else:
next_read_index = loop_multiple(next_read_index,
bytes_per_voxel,
Sizes,
split_indexes,
split_names,
split_meta_cache,
split_read_time,
split_write_time,
split_seek_time,
split_seek_number,
benchmark)
# if write range is larger than img size, we are done
if next_read_index[0] >= original_img_voxels:
break
if benchmark:
return {'split_read_time': split_read_time,
'split_write_time': split_write_time,
'split_seek_time': split_seek_time,
'split_nb_seeks': split_seek_number}
else:
return
|
b2a7048628c54bf8976f9b3182fe4cecc18468e7
| 3,640,808
|
def new_parameter_value(data, parameter_key: str):
"""Return the new parameter value and if necessary, remove any obsolete multiple choice values."""
new_value = dict(bottle.request.json)[parameter_key]
source_parameter = data.datamodel["sources"][data.source["type"]]["parameters"][parameter_key]
if source_parameter["type"] == "multiple_choice":
new_value = [value for value in new_value if value in source_parameter["values"]]
return new_value
|
41160804aba582ce0c588762bb1a96ea53e258df
| 3,640,809
|
from typing import Sequence
from typing import Optional
def rotate_to_base_frame(
pybullet_client: bullet_client.BulletClient,
urdf_id: int,
vector: Sequence[float],
init_orientation_inv_quat: Optional[Sequence[float]] = (0, 0, 0, 1)
) -> np.ndarray:
"""Rotates the input vector to the base coordinate systems.
Note: This is different from world frame to base frame transformation, as we
do not apply any translation here.
Args:
pybullet_client: The bullet client.
urdf_id: The unique id returned after loading URDF.
vector: Input vector in the world frame.
init_orientation_inv_quat:
Returns:
A rotated vector in the base frame.
"""
_, base_orientation_quat = (
pybullet_client.getBasePositionAndOrientation(urdf_id))
_, base_orientation_quat_from_init = pybullet_client.multiplyTransforms(
positionA=(0, 0, 0),
orientationA=init_orientation_inv_quat,
positionB=(0, 0, 0),
orientationB=base_orientation_quat)
_, inverse_base_orientation = pybullet_client.invertTransform(
[0, 0, 0], base_orientation_quat_from_init)
# PyBullet transforms requires simple list/tuple or it may crash.
if isinstance(vector, np.ndarray):
vector_list = vector.tolist()
else:
vector_list = vector
local_vector, _ = pybullet_client.multiplyTransforms(
positionA=(0, 0, 0),
orientationA=inverse_base_orientation,
positionB=vector_list,
orientationB=(0, 0, 0, 1),
)
return np.array(local_vector)
|
5651e0183cd61555f90fe6af1e5c5dc2bec6e8b5
| 3,640,810
|
def show_page_map(label):
"""Renders the base page map code."""
return render('page_map.html', {
'map_label': label.replace('_', ' '),
})
|
623d47c4de57c1810c07475a70e501d55ee5e9ae
| 3,640,811
|
def create_clf_unicycle_position_controller(linear_velocity_gain=0.8, angular_velocity_gain=3):
"""Creates a unicycle model pose controller. Drives the unicycle model to a given position
and orientation. (($u: \mathbf{R}^{3 \times N} \times \mathbf{R}^{2 \times N} \to \mathbf{R}^{2 \times N}$)
linear_velocity_gain - the gain impacting the produced unicycle linear velocity
angular_velocity_gain - the gain impacting the produced unicycle angular velocity
-> function
"""
#Check user input types
assert isinstance(linear_velocity_gain, (int, float)), "In the function create_clf_unicycle_position_controller, the linear velocity gain (linear_velocity_gain) must be an integer or float. Recieved type %r." % type(linear_velocity_gain).__name__
assert isinstance(angular_velocity_gain, (int, float)), "In the function create_clf_unicycle_position_controller, the angular velocity gain (angular_velocity_gain) must be an integer or float. Recieved type %r." % type(angular_velocity_gain).__name__
#Check user input ranges/sizes
assert linear_velocity_gain >= 0, "In the function create_clf_unicycle_position_controller, the linear velocity gain (linear_velocity_gain) must be greater than or equal to zero. Recieved %r." % linear_velocity_gain
assert angular_velocity_gain >= 0, "In the function create_clf_unicycle_position_controller, the angular velocity gain (angular_velocity_gain) must be greater than or equal to zero. Recieved %r." % angular_velocity_gain
def position_uni_clf_controller(states, positions):
""" A position controller for unicycle models. This utilized a control lyapunov function
(CLF) to drive a unicycle system to a desired position. This function operates on unicycle
states and desired positions to return a unicycle velocity command vector.
states: 3xN numpy array (of unicycle states, [x;y;theta])
poses: 3xN numpy array (of desired positons, [x_goal;y_goal])
-> 2xN numpy array (of unicycle control inputs)
"""
#Check user input types
assert isinstance(states, np.ndarray), "In the function created by the create_clf_unicycle_position_controller function, the single-integrator robot states (xi) must be a numpy array. Recieved type %r." % type(states).__name__
assert isinstance(positions, np.ndarray), "In the function created by the create_clf_unicycle_position_controller function, the robot goal points (positions) must be a numpy array. Recieved type %r." % type(positions).__name__
#Check user input ranges/sizes
assert states.shape[0] == 3, "In the function created by the create_clf_unicycle_position_controller function, the dimension of the unicycle robot states (states) must be 3 ([x;y;theta]). Recieved dimension %r." % states.shape[0]
assert positions.shape[0] == 2, "In the function created by the create_clf_unicycle_position_controller function, the dimension of the robot goal positions (positions) must be 2 ([x_goal;y_goal]). Recieved dimension %r." % positions.shape[0]
assert states.shape[1] == positions.shape[1], "In the function created by the create_clf_unicycle_position_controller function, the number of unicycle robot states (states) must be equal to the number of robot goal positions (positions). Recieved a current robot pose input array (states) of size %r states %r and desired position array (positions) of size %r states %r." % (states.shape[0], states.shape[1], positions.shape[0], positions.shape[1])
_,N = np.shape(states)
dxu = np.zeros((2, N))
pos_error = positions - states[:2][:]
rot_error = np.arctan2(pos_error[1][:],pos_error[0][:])
dist = np.linalg.norm(pos_error, axis=0)
dxu[0][:]=linear_velocity_gain*dist*np.cos(rot_error-states[2][:])
dxu[1][:]=angular_velocity_gain*dist*np.sin(rot_error-states[2][:])
return dxu
return position_uni_clf_controller
|
4d75c85079ca5350473c058019ae6f4763fdd97b
| 3,640,812
|
from typing import Tuple
def stft_reassign_from_sig(sig_wf: np.ndarray,
frequency_sample_rate_hz: float,
band_order_Nth: float) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray,
np.ndarray]:
"""
Librosa STFT is complex FFT grid, not power
Reassigned frequencies are not the same as the standard mesh frequencies
:param sig_wf: array with input signal
:param frequency_sample_rate_hz: sample rate of frequency in Hz
:param band_order_Nth: Nth order of constant Q bands
:return: six numpy ndarrays with STFT, STFT_bits, time_stft_s, frequency_stft_hz, time_stft_rsg_s,
frequency_stft_rsg_hz
"""
sig_duration_s = len(sig_wf)/frequency_sample_rate_hz
_, min_frequency_hz = scales.from_duration(band_order_Nth, sig_duration_s)
order_Nth, cycles_M, quality_Q, \
frequency_center, frequency_start, frequency_end = \
scales.frequency_bands_g2f1(scale_order_input=band_order_Nth,
frequency_low_input=min_frequency_hz,
frequency_sample_rate_input=frequency_sample_rate_hz)
# Choose the spectral resolution as the key parameter
frequency_resolution_min_hz = np.min(frequency_end - frequency_start)
frequency_resolution_max_hz = np.max(frequency_end - frequency_start)
frequency_resolution_hz_geo = np.sqrt(frequency_resolution_min_hz*frequency_resolution_max_hz)
stft_time_duration_s = 1/frequency_resolution_hz_geo
stft_points_per_seg = int(frequency_sample_rate_hz*stft_time_duration_s)
# From CQT
stft_points_hop, _, _, _, _ = \
scales.cqt_frequency_bands_g2f1(band_order_Nth,
min_frequency_hz,
frequency_sample_rate_hz,
is_power_2=False)
print('Reassigned STFT Duration, NFFT, HOP:', len(sig_wf), stft_points_per_seg, stft_points_hop)
STFT_Scaling = 2*np.sqrt(np.pi)/stft_points_per_seg
# Reassigned frequencies require a 'best fit' solution.
frequency_stft_rsg_hz, time_stft_rsg_s, STFT_mag = \
librosa.reassigned_spectrogram(sig_wf, sr=frequency_sample_rate_hz,
n_fft=stft_points_per_seg,
hop_length=stft_points_hop, win_length=None,
window='hann', center=False, pad_mode='reflect')
# Must be scaled to match scipy psd
STFT_mag *= STFT_Scaling
STFT_bits = utils.log2epsilon(STFT_mag)
# Standard mesh times and frequencies for plotting - nice to have both
time_stft_s = librosa.times_like(STFT_mag, sr=frequency_sample_rate_hz,
hop_length=stft_points_hop)
frequency_stft_hz = librosa.core.fft_frequencies(sr=frequency_sample_rate_hz,
n_fft=stft_points_per_seg)
# Reassigned frequencies are not the same as the standard mesh frequencies
return STFT_mag, STFT_bits, time_stft_s, frequency_stft_hz, time_stft_rsg_s, frequency_stft_rsg_hz
|
90aa2b019ace90500c38feb5e643a8ca3c02360a
| 3,640,814
|
from typing import List
def download(*urls, zip: str=None, unzip: bool=False, **kwargs) -> List[File]:
"""
Download multiple zippyshare urls
Parameters
-----------
*urls
Zippyshare urls.
zip: :class:`str`
Zip all downloaded files once finished.
Zip filename will be taken from ``zip`` parameter,
default to ``None``.
NOTE: You can't mix ``zip`` and ``unzip`` options together
with value ``True``, it will raise error.
unzip: :class:`bool`
Unzip all downloaded files once finished
(if given file is zip format extract it, otherwise ignore it),
default to ``False``.
NOTE: You can't mix ``zip`` and ``unzip`` options together
with value ``True``, it will raise error.
**kwargs
These parameters will be passed to :meth:`File.download()`,
except for parameter ``filename``.
Returns
-------
List[:class:`File`]
a list of Zippyshare files
"""
if unzip and zip:
raise ValueError("unzip and zip paramaters cannot be set together")
downloaded_files = {}
files = []
for url in urls:
info = get_info(url)
file = File(info)
files.append(file)
if kwargs.get('filename') is not None:
kwargs.pop('filename')
file_path = file.download(**kwargs)
downloaded_files[file] = file_path
if unzip:
extract_archived_file(str(file_path))
if zip:
log.info(build_pretty_list_log(downloaded_files, 'Zipping all downloaded files to "%s"' % zip))
archive_zip(downloaded_files, zip)
log.info(build_pretty_list_log(downloaded_files, 'Successfully zip all downloaded files to "%s"' % zip))
return files
|
a1197d264fa3305fb545a60e5963be2fc326aa5d
| 3,640,815
|
def pop_arg(args_list, expected_size_after=0, msg="Missing argument"):
"""helper function to get and check command line arguments"""
try:
value = args_list.pop(0)
except IndexError:
raise BadCommandUsage(msg)
if expected_size_after is not None and len(args_list) > expected_size_after:
raise BadCommandUsage('too many arguments')
return value
|
90b1f1ae596a9257d15cc189e87223b166252c9a
| 3,640,816
|
def d4s(data):
"""
Beam parameter calculation according to the ISO standard D4sigma integrals
input: 2D array of intensity values (pixels)
output:
xx, yy: x and y centres
dx, dy: 4 sigma widths for x and y
angle: inferred rotation angle, radians
"""
gg = data
dimy, dimx = np.shape(data)
X, Y = np.mgrid[0:dimx,0:dimy]
X = X.T
Y = Y.T
P = np.sum(data)
xx = np.sum(data * X) / P
yy = np.sum(data * Y) / P
xx2 = np.sum(data * (X - xx)**2)/P
yy2 = np.sum(data * (Y - yy)**2)/P
xy = np.sum(data * (X - xx) * (Y - yy)) / P
gamm = np.sign(xx2 - yy2)
angle = 0.5 * np.arctan(2*xy / (xx2 - yy2))
try:
dx = 2 * np.sqrt(2) * (xx2 + yy2 + gamm * ( (xx2 - yy2)**2 + 4*xy**2)**0.5)**(0.5)
dy = 2 * np.sqrt(2) * (xx2 + yy2 - gamm * ( (xx2 - yy2)**2 + 4*xy**2)**0.5)**(0.5)
except:
# In case of error, just make the size very large
print "Fitting error"
dx, dy = data.shape
return xx, yy, dx, dy, angle
|
f10c0792a2e200c980ccd6ffb286bdfabc90bb32
| 3,640,817
|
from astropy.utils import iers
import warnings
import six
def checkWarnings(func, func_args=[], func_kwargs={},
category=UserWarning,
nwarnings=1, message=None, known_warning=None):
"""Function to check expected warnings."""
if (not isinstance(category, list) or len(category) == 1) and nwarnings > 1:
if isinstance(category, list):
category = category * nwarnings
else:
category = [category] * nwarnings
if (not isinstance(message, list) or len(message) == 1) and nwarnings > 1:
if isinstance(message, list):
message = message * nwarnings
else:
message = [message] * nwarnings
if known_warning == 'miriad':
# The default warnings for known telescopes when reading miriad files
category = [UserWarning]
message = ['Altitude is not present in Miriad file, using known '
'location values for PAPER.']
nwarnings = 1
elif known_warning == 'paper_uvfits':
# The default warnings for known telescopes when reading uvfits files
category = [UserWarning] * 2
message = ['Required Antenna frame keyword', 'telescope_location is not set']
nwarnings = 2
elif known_warning == 'fhd':
category = [UserWarning]
message = ['Telescope location derived from obs']
nwarnings = 1
category = uvutils._get_iterable(category)
message = uvutils._get_iterable(message)
clearWarnings()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always") # All warnings triggered
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# filter iers warnings if iers.conf.auto_max_age is set to None, as we do in testing if the iers url is down
if iers.conf.auto_max_age is None:
warnings.filterwarnings("ignore", message="failed to download")
warnings.filterwarnings("ignore", message="time is out of IERS range")
if isinstance(message, six.string_types):
test_message = [message.startswith("LST values stored in ")]
else:
test_message = []
for m in message:
if m is None:
test_message.append(False)
else:
test_message.append(m.startswith("LST values stored in "))
if not any(test_message):
warnings.filterwarnings("ignore", message="LST values stored in ")
retval = func(*func_args, **func_kwargs) # Run function
# Verify
if len(w) != nwarnings:
print('wrong number of warnings. Expected number was {nexp}, '
'actual number was {nact}.'.format(nexp=nwarnings, nact=len(w)))
for idx, wi in enumerate(w):
print('warning {i} is: {w}'.format(i=idx, w=wi))
assert(False)
else:
for i, w_i in enumerate(w):
if w_i.category is not category[i]:
print('expected category ' + str(i) + ' was: ', category[i])
print('category ' + str(i) + ' was: ', str(w_i.category))
assert(False)
if message[i] is not None:
if message[i] not in str(w_i.message):
print('expected message ' + str(i) + ' was: ', message[i])
print('message ' + str(i) + ' was: ', str(w_i.message))
assert(False)
return retval
|
58a40594f48b1f47350e9b6a1ca1d956dfd63d04
| 3,640,818
|
from omas.omas_utils import list_structures
from omas.omas_utils import load_structure
def extract_times(imas_version=omas_rcparams['default_imas_version']):
"""
return list of strings with .time across all structures
:param imas_version: imas version
:return: list with times
"""
times = []
for structure in list_structures(imas_version=imas_version):
tmp = load_structure(structure, imas_version)[0]
for item in tmp:
if not item.endswith('.time') or 'data_type' not in tmp[item] or tmp[item]['data_type'] == 'STRUCTURE':
continue
times.append(item)
return sorted(times)
|
2b13361dc713d90a946554383b556a8ced24ac55
| 3,640,819
|
import json
def load_appdata():
"""load application data from json file
"""
try:
_in = open(FNAME)
except FileNotFoundError:
return
with _in:
appdata = json.load(_in)
return appdata
|
afb3a69a5abf72cd14a8ae0c8c99ccc3350899a1
| 3,640,820
|
def compute_couplings(models_a, models_b):
"""
Given logistic models for two multiple sequence alignments, calculate all
intermolecular coupling strengths between residues.
The coupling strength between positions i and j is calculated as the 2-norm
of the concatenation of the coefficient submatrices that describe the
relationships between the two positions.
----------------------------------------------------------------------------
Reference:
Ovchinnikov, Sergey, Hetunandan Kamisetty, and David Baker.
"Robust and accurate prediction of residue–residue interactions across
protein interfaces using evolutionary information." Elife 3 (2014): e02030
----------------------------------------------------------------------------
Arguments
---------
models_a: list of SGDClassifier objects, one for each analyzed column in
MSA A
models_b: list of SGDClassifier objects, one for each analyzed column in
MSA B
Returns
-------
couplings: dict, contains intermolecular coupling strengths in the format
{"Ai:Bj":float,...}
contact_mtx: array, 2D matrix of dimensions (models_a, models_b); contains
the value of the coupling strength for each pair of positions
"""
# Dictionary to store couplings between residues
couplings = {}
# To keep track of the submatrix we need to take from the matrix of
# coefficients from protein B
# Iterate over models / columns of MSA A
# Variable to keep track of the submatrix we need to take from the matrix
# of coefficients of models of B
offset_a = 0
contact_mtx = np.zeros((len(models_a), len(models_b)))
for i, model_a in enumerate(models_a):
# Variable to keep track of the submatrix we need to take from the
# matrix of coefficients from protein A
end_point_a = 0
for j, model_b in enumerate(models_b):
# Select the relevant submatrices of coefficients, this is,
# the columns in A that indicate coupling to B and vice versa
# Taking the 2-norm of a vector and a matrix is equivalent. In case
# of mismatching dimensions, flatten the matrices into vectors and
# concatenate them
sel_coefs_a, end_point_a = select_coefs(model_a.coef_, end_point_a)
sel_coefs_a = sel_coefs_a.flatten()
sel_coefs_b, _ = select_coefs(model_b.coef_, offset_a)
sel_coefs_b = sel_coefs_b.flatten()
coef_vector = np.concatenate((sel_coefs_a, sel_coefs_b))
# Calculate coupling strength (as the 2-norm of the vector of
# coefficients) and store the value in the output
coupling = np.linalg.norm(coef_vector)
coupling_name = ''.join(['A', str(i), ':', 'B', str(j)])
couplings[coupling_name] = coupling
contact_mtx[i][j] = coupling
offset_a += 20
return couplings, contact_mtx
|
761c1987a7e230f70e123ce8d1746881b1b26cae
| 3,640,821
|
def update_checkout_line(request, checkout, variant_id):
"""Update the line quantities."""
if not request.is_ajax():
return redirect("checkout:index")
checkout_line = get_object_or_404(checkout.lines, variant_id=variant_id)
discounts = request.discounts
status = None
form = ReplaceCheckoutLineForm(
request.POST,
checkout=checkout,
variant=checkout_line.variant,
discounts=discounts,
)
manager = request.extensions
if form.is_valid():
form.save()
checkout.refresh_from_db()
# Refresh obj from db and confirm that checkout still has this line
checkout_line = checkout.lines.filter(variant_id=variant_id).first()
line_total = zero_taxed_money(currency=settings.DEFAULT_CURRENCY)
if checkout_line:
line_total = manager.calculate_checkout_line_total(checkout_line, discounts)
subtotal = get_display_price(line_total)
response = {
"variantId": variant_id,
"subtotal": format_money(subtotal),
"total": 0,
"checkout": {"numItems": checkout.quantity, "numLines": len(checkout)},
}
checkout_total = manager.calculate_checkout_subtotal(checkout, discounts)
checkout_total = get_display_price(checkout_total)
response["total"] = format_money(checkout_total)
local_checkout_total = to_local_currency(checkout_total, request.currency)
if local_checkout_total is not None:
response["localTotal"] = format_money(local_checkout_total)
status = 200
elif request.POST is not None:
response = {"error": form.errors}
status = 400
return JsonResponse(response, status=status)
|
9394699c50bc3724ac253f288e23cc77eac05a3a
| 3,640,822
|
from typing import Optional
def merge_df(
df: Optional[pd.DataFrame], new_df: Optional[pd.DataFrame], how="left"
):
"""
join two dataframes. Assumes the dataframes are indexed on datetime
Args:
df: optional dataframe
new_df: optional dataframe
Returns:
The merged dataframe
"""
if df is None:
result_df = new_df
elif new_df is None:
result_df = df
else:
try:
result_df = pd.merge_ordered(
df.reset_index(),
new_df.reset_index().drop_duplicates()
)
result_df.set_index("datetime", inplace=True)
result_df.sort_index(inplace=True)
if len(result_df.index.unique()) != len(result_df.index):
LOG.error("Merging did not result in unique indexes. Killing"
" to avoid missing data")
raise ValueError("Issue merging")
except Exception as e:
LOG.error("failed joining dataframes.")
raise e
return result_df
|
783111942086a23fbb13b1e96f2d098c7db0f963
| 3,640,823
|
def _is_leaf(tree: DecisionTreeClassifier, node_id: int) -> bool:
"""
Determines if a tree node is a leaf.
:param tree: an `sklearn` decision tree classifier object
:param node_id: an integer identifying a node in the above tree
:return: a boolean `True` if the node is a leaf, `False` otherwise
"""
return tree.tree_.children_left[node_id] == tree.tree_.children_right[node_id]
|
bdc5affe82c1c7505668e0f7c70dbb548170b6e1
| 3,640,825
|
async def commission_reset(bot, context):
"""Resets a given user's post cooldown manually."""
advertisement_data = await _get_advertisement_data(bot, context.guild)
deleted_persistence = data.get(
bot, __name__, 'recently_deleted', guild_id=context.guild.id, default={})
user_id = context.arguments[0].id
if user_id in advertisement_data:
del advertisement_data[user_id]
if str(user_id) in deleted_persistence:
del deleted_persistence[str(user_id)]
return Response(
"Reset that user's advertisement cooldown. Their last advertisement post "
"will need to be removed manually if necessary.")
|
06666421569b92fdf8a943351058e3f53c7d0777
| 3,640,826
|
def test_sample_problems_auto_1d_maximization(max_iter, max_response, error_lim, model_type, capsys):
"""
solve a sample problem in two different conditions.
test that auto method works for a particular single-covariate (univariate) function
"""
# define data
x_input = [(0.5, 0,
1)] # covariates come as a list of tuples (one per covariate: (<initial_guess>, <min>, <max>))
# define response function
def f(x):
return -(6 * x["covar0"].iloc[0] - 2) ** 2 * np.sin(12 * x["covar0"].iloc[0] - 4)
# initialize class instance
cc = TuneSession(covars=x_input, model=model_type)
# run the auto-method
cc.auto(response_samp_func=f, max_iter=max_iter)
# assert
assert cc.model["covars_sampled_iter"] == max_iter
# assert that max value found
THEORETICAL_MAX_COVAR = 0.75725
assert abs(cc.covars_best_response_value[-1].item() - THEORETICAL_MAX_COVAR) < error_lim
# run current_best method
cc.current_best()
captured = capsys.readouterr()
assert abs(cc.best["covars"].values[0][0] - THEORETICAL_MAX_COVAR) < error_lim
assert abs(cc.best["response"].values[0][0] - max_response) < error_lim
assert cc.best["iteration_when_recorded"] == max_iter
|
3db394c4b1cccb276d3efe80ff7561830fc82b7a
| 3,640,827
|
def heatmap_numeric_w_dependent_variable(df, dependent_variable):
"""
Takes df, a dependant variable as str
Returns a heatmap of all independent variables' correlations with dependent variable
"""
plt.figure(figsize=(10, 5.5))
figure = sns.heatmap(
df.corr()[[dependent_variable]].sort_values(by=dependent_variable),
annot=True,
cmap="coolwarm",
vmin=-1,
vmax=1,
)
return figure
|
46919deb37ee1f641983761a81ffeb830dac8217
| 3,640,829
|
def numpy2seq(Z, val=-1):
"""Appends the minimal required amount of zeroes at the end of each
array in the jagged array `M`, such that `M` looses its jagedness."""
seq = []
for z in t2n(Z).astype(int):
i = np.where(z==val)[0]
if i.size == 0:
seq += [z.tolist()]
else:
seq += [z[:min(i)].tolist()]
return seq
|
b46f6379a3eba0c5754c1a824dc28a43a10dc742
| 3,640,831
|
def winner(board):
"""Detirmine the game's winner."""
WAYS_TO_WIN = ((0, 1, 2),
(3, 4, 5),
(6, 7, 8),
(0, 3, 6),
(1, 4, 7),
(2, 5, 8),
(0, 4, 8),
(2, 4, 6))
for row in WAYS_TO_WIN:
if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:
winner = board[row[0]]
return winner
if EMPTY not in board:
return TIE
return None
|
6adb31e668c1d7e2723df7d65ab34246748c3249
| 3,640,832
|
def compute_inv_propensity(train_file, A, B):
"""
Compute Inverse propensity values
Values for A/B:
Wikpedia-500K: 0.5/0.4
Amazon-670K, Amazon-3M: 0.6/2.6
Others: 0.55/1.5
"""
train_labels = data_utils.read_sparse_file(train_file)
inv_propen = xc_metrics.compute_inv_propesity(train_labels, A, B)
return inv_propen
|
df8f45cf48f056cee6f3f9026f546dcea0f9ee75
| 3,640,833
|
def tanh(x):
"""
Returns the cos of x.
Args:
x (TensorOp): A tensor.
Returns:
TensorOp: The tanh of x.
"""
return TanhOp(x)
|
bef86675a70714f3e33a6828353e1f71958c3057
| 3,640,834
|
def importBodyCSVDataset(testSplit: float, local_import: bool):
"""Import body dataset as numpy arrays from GitHub if available, or local dataset otherwise.
Args:
testSplit (float, optional): Percentage of the dataset reserved for testing. Defaults to 0.15. Must be between 0.0 and 1.0.
"""
assert 0.0 <= testSplit <= 1.0
datasetPath = DATASETS_PATH / "BodyPose_Dataset.csv"
datasetURL = "https://raw.githubusercontent.com/ArthurFDLR/pose-classification-kit/master/pose_classification_kit/datasets/BodyPose_Dataset.csv"
if local_import:
dataset_df = pd.read_csv(datasetPath)
else:
dataset_df = pd.read_csv(datasetURL)
bodyLabels_df = dataset_df.groupby("label")
labels = list(dataset_df.label.unique())
# Find the minimum number of samples accross categories to uniformly distributed sample sets
total_size_cat = bodyLabels_df.size().min()
test_size_cat = int(total_size_cat * testSplit)
train_size_cat = total_size_cat - test_size_cat
x_train = []
x_test = []
y_train = []
y_test = []
# Iterate over each labeled group
for label, group in bodyLabels_df:
# remove irrelevant columns
group_array = group.drop(["label", "accuracy"], axis=1).to_numpy()
np.random.shuffle(group_array)
group_array_2D = [np.array((x[::2], x[1::2])).T for x in group_array]
x_train.append(group_array_2D[:train_size_cat])
y_train.append([label] * train_size_cat)
x_test.append(group_array_2D[train_size_cat : train_size_cat + test_size_cat])
y_test.append([label] * test_size_cat)
# Concatenate sample sets as numpy arrays
x_train = np.concatenate(x_train, axis=0)
x_test = np.concatenate(x_test, axis=0)
y_train = np.concatenate(y_train, axis=0)
y_test = np.concatenate(y_test, axis=0)
return x_train, x_test, y_train, y_test, labels
|
411d8c1aa3e1d741e2b169f1a4c3065af8f5e82c
| 3,640,835
|
def mvstdtprob(a, b, R, df, ieps=1e-5, quadkwds=None, mvstkwds=None):
"""
Probability of rectangular area of standard t distribution
assumes mean is zero and R is correlation matrix
Notes
-----
This function does not calculate the estimate of the combined error
between the underlying multivariate normal probability calculations
and the integration.
"""
kwds = dict(args=(a, b, R, df), epsabs=1e-4, epsrel=1e-2, limit=150)
if not quadkwds is None:
kwds.update(quadkwds)
lower, upper = chi.ppf([ieps, 1 - ieps], df)
res, err = integrate.quad(funbgh2, lower, upper, **kwds)
prob = res * bghfactor(df)
return prob
|
2b15e3ce209d01e4790391242cbd87914a79fa5d
| 3,640,836
|
import re
def dropNested(text, openDelim, closeDelim):
"""
A matching function for nested expressions, e.g. namespaces and tables.
"""
openRE = re.compile(openDelim, re.IGNORECASE)
closeRE = re.compile(closeDelim, re.IGNORECASE)
# partition text in separate blocks { } { }
spans = [] # pairs (s, e) for each partition
nest = 0 # nesting level
start = openRE.search(text, 0)
if not start:
return text
end = closeRE.search(text, start.end())
next = start
while end:
next = openRE.search(text, next.end())
if not next: # termination
while nest: # close all pending
nest -= 1
end0 = closeRE.search(text, end.end())
if end0:
end = end0
else:
break
spans.append((start.start(), end.end()))
break
while end.end() < next.start():
# { } {
if nest:
nest -= 1
# try closing more
last = end.end()
end = closeRE.search(text, end.end())
if not end: # unbalanced
if spans:
span = (spans[0][0], last)
else:
span = (start.start(), last)
spans = [span]
break
else:
spans.append((start.start(), end.end()))
# advance start, find next close
start = next
end = closeRE.search(text, next.end())
break # { }
if next != start:
# { { }
nest += 1
# collect text outside partitions
return dropSpans(spans, text)
|
dd77b86533dd43bcecf2ef944a61b59c4150aaae
| 3,640,839
|
def update_service(
*, db_session: Session = Depends(get_db), service_id: PrimaryKey, service_in: ServiceUpdate
):
"""Update an existing service."""
service = get(db_session=db_session, service_id=service_id)
if not service:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=[{"msg": "A service with this id does not exist."}],
)
try:
service = update(db_session=db_session, service=service, service_in=service_in)
except IntegrityError:
raise ValidationError(
[ErrorWrapper(ExistsError(msg="A service with this name already exists."), loc="name")],
model=ServiceUpdate,
)
return service
|
0471c4bd496004a9c1cc5af4d806bd8109f62ca7
| 3,640,844
|
def import_flow_by_ref(flow_strref):
"""Return flow class by flow string reference."""
app_label, flow_path = flow_strref.split('/')
return import_string('{}.{}'.format(get_app_package(app_label), flow_path))
|
c2f9fe0b9ccc409b3bd64b6691ee34ca8d430ed6
| 3,640,845
|
def _escape_value(value):
"""Escape a value."""
value = value.replace(b"\\", b"\\\\")
value = value.replace(b"\n", b"\\n")
value = value.replace(b"\t", b"\\t")
value = value.replace(b'"', b'\\"')
return value
|
b58a3236c0686c7fb6a33859986123dc2b8089cc
| 3,640,846
|
from typing import Iterable
def find(*objects: Iterable[object]):
"""Sometimes you know the inputs and outputs for a procedure, but you don't remember the name.
methodfinder.find tries to find the name.
>>> import methodfinder
>>> import itertools
>>> methodfinder.find([1,2,3]) == 6
sum([1, 2, 3])
>>> methodfinder.find('1 + 1') == 2
eval('1 + 1')
>>> methodfinder.find(0.0) == 1.0
math.cos(0.0)
math.cosh(0.0)
math.erfc(0.0)
math.exp(0.0)
>>> methodfinder.find(0) == 1
0.denominator
math.factorial(0)
>>> import numpy as np
>>> methodfinder.find(np, 3) == np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
numpy.eye(3)
numpy.identity(3)
"""
# Just call the wrapper function so that the == sign can be used to specify
# the desired result
return _Foo(objects)
|
fcfc3c4d0e6d72b6d9f1d7b7bfd46146d8bbf027
| 3,640,847
|
def join_returns(cfg, arg_names, function_ast=None):
"""Joins multiple returns in a CFG into a single block
Given a CFG with multiple return statements, this function will replace the
returns by gotos to a common join block.
"""
join_args = [ir.Argument(function_ast, info=n, name=n) for n in arg_names]
join = ir.Block(function_ast, join_args, info="MERGE RETURNS")
returns = list(of_type[ir.Return](cfg.graph.nodes))
if returns:
cfg += CfgSimple.statement(join)
# Replace returns with gotos to joining block
for ret in returns:
assert len(ret.returns) == len(arg_names), (ret.returns, arg_names)
goto = ir.Goto(ret.ast_node, join, ret.returns)
cfg = cfg.replace(ret, goto)
cfg = cfg + (goto, join)
return cfg, join_args
|
0a89c2c6df39e0693597358f01704619cbd1d0bd
| 3,640,848
|
def get_all():
"""
Returns list of all tweets from this server.
"""
return jsonify([t.to_dict() for t in tweet.get_all()])
|
a8803f46ca4c32ea3a0f607a7a37d23a5d97c316
| 3,640,849
|
from carbonplan_trace.v1.glas_preprocess import select_valid_area # avoid circular import
def proportion_sig_beg_to_start_of_ground(ds):
"""
The total energy from signal beginning to the start of the ground peak,
normalized by total energy of the waveform. Ground peak assumed to be the last peak.
"""
ds = get_dist_metric_value(ds, metric='start_of_ground_peak_dist')
# the processed wf is from sig beg to sig end, select sig beg to ground peak
sig_beg_to_ground = select_valid_area(
bins=ds.rec_wf_sample_dist,
wf=ds.processed_wf,
signal_begin_dist=ds.sig_begin_dist,
signal_end_dist=ds.start_of_ground_peak_dist,
)
# make sure dimensions matches up
dims = ds.processed_wf.dims
sig_beg_to_ground = sig_beg_to_ground.transpose(dims[0], dims[1])
# total energy of the smoothed waveform
total = ds.processed_wf.sum(dim="rec_bin")
return sig_beg_to_ground.sum(dim="rec_bin") / total
|
73fbbd90c8511433bcdae225daea5b7cba9e8297
| 3,640,850
|
import requests
def post_file(url, file_path, username, password):
"""Post an image file to the classifier."""
kwargs = {}
if username:
kwargs['auth'] = requests.auth.HTTPBasicAuth(username, password)
file = {'file': open(file_path, 'rb')}
response = requests.post(
url,
files=file,
**kwargs
)
if response.status_code == HTTP_OK:
return response
return None
|
b615e5a766e6ca5d0427bfcdbd475e1b6cd5b9bb
| 3,640,851
|
def bias_init_with_prob(prior_prob):
""" initialize conv/fc bias value according to giving probablity"""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init
|
533f777df5e8346ab2eadf5f366a275bab099aec
| 3,640,852
|
def parse_number(s, start_position):
"""
If an integer or float begins at the specified position in the
given string, then return a tuple C{(val, end_position)}
containing the value of the number and the position where it ends.
Otherwise, raise a L{ParseError}.
"""
m = _PARSE_NUMBER_VALUE.match(s, start_position)
if not m or not (m.group(1) or m.group(2)):
raise ParseError('number', start_position)
if m.group(2): return float(m.group()), m.end()
else: return int(m.group()), m.end()
|
854e9290b5853e525ea1ba3f658f59cea37b117c
| 3,640,853
|
def training_data_provider(train_s, train_t):
"""
Concatenates two lists containing adata files
# Parameters
train_s: `~anndata.AnnData`
Annotated data matrix.
train_t: `~anndata.AnnData`
Annotated data matrix.
# Returns
Concatenated Annotated data matrix.
# Example
```python
import scgen
import anndata
train_data = anndata.read("./data/train_kang.h5ad")
test_data = anndata.read("./data/test.h5ad")
whole_data = training_data_provider(train_data, test_data)
```
"""
train_s_X = []
train_s_diet = []
train_s_groups = []
for i in train_s:
train_s_X.append(i.X.A)
train_s_diet.append(i.obs["condition"].tolist())
train_s_groups.append(i.obs["cell_type"].tolist())
train_s_X = np.concatenate(train_s_X)
temp = []
for i in train_s_diet:
temp = temp + i
train_s_diet = temp
temp = []
for i in train_s_groups:
temp = temp + i
train_s_groups = temp
train_t_X = []
train_t_diet = []
train_t_groups = []
for i in train_t:
train_t_X.append(i.X.A)
train_t_diet.append(i.obs["condition"].tolist())
train_t_groups.append(i.obs["cell_type"].tolist())
temp = []
for i in train_t_diet:
temp = temp + i
train_t_diet = temp
temp = []
for i in train_t_groups:
temp = temp + i
train_t_groups = temp
train_t_X = np.concatenate(train_t_X)
train_real = np.concatenate([train_s_X, train_t_X]) # concat all
train_real = anndata.AnnData(train_real)
train_real.obs["condition"] = train_s_diet + train_t_diet
train_real.obs["cell_type"] = train_s_groups + train_t_groups
return train_real
|
35016ecb6f57e2814dacc6e36408882025311bb9
| 3,640,854
|
import warnings
def _build_trees(base_estimator, estimator_params, params, X, y, sample_weight,
tree_state, n_trees, verbose=0, class_weight=None,
bootstrap=False):
""" Fit a single tree in parallel """
tree = _make_estimator(
_get_value(base_estimator), estimator_params,
params=params, random_state=tree_state
)
if bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree_state, n_samples)
sample_counts = np.bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
|
43259d71a5d666e371c90e15c1ca61241fbee8e0
| 3,640,855
|
def select_privilege():
"""Provide a select Privilege model for testing."""
priv = Privilege(
database_object=DatabaseObject(name="one_table", type=DatabaseObjectType.TABLE),
action=Action.SELECT,
)
return priv
|
721f8edd0b6777a082682e377a80c73f8dc2bb00
| 3,640,856
|
def getLinkToSong(res):
"""
getLinkToSong(res): link to all songs
:param: res: information about the playlist -> getResponse(pl_id)
:returns: list of links to each song
"""
return res['items'][0]['track']['external_urls']['spotify']
|
e59fe598ed900a90dcf5376d265eedfc51d8e0a7
| 3,640,858
|
def entropy_sampling(classifier, X, n_instances=1):
"""Entropy sampling query strategy, uses entropy of all probabilities as score.
This strategy selects the samples with the highest entropy in their prediction
probabilities.
Args:
classifier: The classifier for which the labels are to be queried.
X: The pool of samples to query from.
n_instances: Number of samples to be queried.
Returns:
The indices of the instances from X chosen to be labelled;
the instances from X chosen to be labelled.
"""
classwise_uncertainty = _get_probability_classes(classifier, X)
entropies = np.transpose(entropy(np.transpose(classwise_uncertainty)))
index = np.flip(np.argsort(entropies))[:n_instances]
return index, entropies[index]
|
ffc465a3e8a517e692927f051dea0162d3191cf9
| 3,640,859
|
def browser(browserWsgiAppS):
"""Fixture for testing with zope.testbrowser."""
assert icemac.addressbook.testing.CURRENT_CONNECTION is not None, \
"The `browser` fixture needs a database fixture like `address_book`."
return icemac.ab.calexport.testing.Browser(wsgi_app=browserWsgiAppS)
|
a256b814a08833eec88eb6289b6c5a57f17e7d84
| 3,640,860
|
def parse_playing_now_message(playback):
"""parse_playing_now_message
:param playback: object
:returns str
"""
track = playback.get("item", {}).get("name", False)
artist = playback.get("item", {}).get("artists", [])
artist = map(lambda a: a.get("name", ""), artist)
artist = ", ".join(list(artist))
message = "Playing '%s' from '%s' now!" % (track, artist)
if not track:
message = "Could not get current track!"
return message
|
88d7c35257c2aaee44d1bdc1ec06640603c6a286
| 3,640,861
|
import requests
def load_remote_image(image_url):
"""Loads a remotely stored image into memory as an OpenCV/Numpy array
Args:
image_url (str): the URL of the image
Returns:
numpy ndarray: the image in OpenCV format (a [rows, cols, 3] BGR numpy
array)
"""
response = requests.get(image_url, stream=True)
img = Image.open(BytesIO(response.content))
image = cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
return image
|
a760e76df679cc15788332df02e5470ec5b60ec2
| 3,640,863
|
def evlt(inp : str) -> int:
""" Evaluates the passed string and returns the value if
successful, otherwise raises an error """
operand = [] # stack for operands
operator = [] # stack for operators + parentheses
i = 0 # loop variable, cannot do range because have to increment dynamically
if inp.count('(') != inp.count(')'):
raise TooManyBracketsException()
while i < len(inp): # while not EOF
if inp[i].isdigit(): # if character is a digit
num = ""
while i < len(inp) and inp[i].isdigit(): # Logic to fetch an entire number,
num += inp[i]
i += 1
if int(num) >= 2**31 - 1:
raise OverflowError()
operand.append(int(num)) # push operand to stack
elif inp[i] == '(': # if opening brace, push to stack
operator.append(inp[i])
i += 1
elif inp[i] in operators:
try: # if operator, pop all operators having a higher precedence
while len(operator) and precedence(operator[-1]) >= precedence(inp[i]):
b = operand.pop()
a = operand.pop()
op = operator.pop()
operand.append(evlexp(a, b, op)) # evaluate them with the last 2 values in operand stack and append to itself
operator.append(inp[i]) # append operator to operator stack)
i += 1
except:
raise TooManyOperatorsException
elif inp[i] == ')': # if closing brace, evaluate all operators in between
while len(inp) != 0 and operator[-1] != '(': # while not EOF and the last(recent) item is not opening bracket
b = operand.pop()
a = operand.pop()
op = operator.pop()
operand.append(evlexp(a, b, op)) # pop the operator in order and evaluate and push to operand stack
operator.pop() # pop (
i += 1
else:
i += 1
continue
while len(operator) != 0: # while operator is not empty
op = operator.pop()
b = operand.pop()
a = operand.pop()
operand.append(evlexp(a, b, op)) # pop and evaluate operators till its empty and append to operand
# if there are no more elements in top of stack, and only one (possibly the answer)
if len(operand) == 1:
return operand[-1]
# if there's more than one element and no more operators, something wrong!
else:
raise TooManyOperandsException()
|
2c0ea8781e969f44fa0575c967366d69a19010eb
| 3,640,864
|
def _create_preactivation_hook(activations):
"""
when we add this hook to a model's layer, it is called whenever
it is about to make the forward pass
"""
def _linear_preactivation_hook(module, inputs):
activations.append(inputs[0].cpu())
return _linear_preactivation_hook
|
7f4cc10f7e051ed8e30556ee054a65c4878f6c0f
| 3,640,866
|
import importlib
def import_by_path(path):
"""
Given a dotted/colon path, like project.module:ClassName.callable,
returns the object at the end of the path.
"""
module_path, object_path = path.split(":", 1)
target = importlib.import_module(module_path)
for bit in object_path.split("."):
target = getattr(target, bit)
return target
|
939b3426f36b3a188f7a48e21551807d42cfa254
| 3,640,867
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.