content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
from pathlib import Path
def get_create_data_dir():
"""Get the data directory.
When the directory does not exist it is created.
"""
# Calculate the dataset data dir
data_dir = Path(get_data_dir()).expanduser()
dataset = _dataset_settings['name']
dataset_dir = data_dir / dataset
# Ensure that the directlry exists
dataset_dir.mkdir(parents=True, exist_ok=True)
return dataset_dir
|
8fd7631504ab7b926f1f6b533d0fdabaa8cad592
| 3,639,485
|
def interpolate_bezier(points, steps=100, **kwargs):
"""Generates an array of waypoints which lie on a 2D Bezier curve described by n (x, y) points. The trajectory is
guaranteed to include the start and end points though only on (x, y, z) axes.
The curve generated is of the nth degree, where n = len(points) - 1
1st point is the start point.
2nd point indicates the orientation at the start point.
(n-1)th point indicates the orientation at the end point.
nth point is the end point.
For information about Bezier curve look at:
- http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/Bezier/bezier-der.html
:param points: (n, 2+) array of waypoints
:return: trajectory with interpolated points
"""
n = len(points) - 1
t = np.linspace(0, 1, steps).reshape((steps, 1))
B = np.zeros((steps, 6))
# could be vectorised:
# r = range(0, n+1)
# coefs = sci.misc.comb(n, r)
# t_1_pow = np.power(np.tile(t-1, (1, 6)), np.tile(r, (steps, 1)))
# t_pow = np.power(np.tile(t, (1, 6)), np.tile(r, (steps, 1)))
for i in xrange(n+1):
e1 = ((1-t)**(n-i) * t**i).reshape((steps, 1))
e2 = points[i, 0:2].reshape((1, 2))
B[:, 0:2] += sci.misc.comb(n, i) * np.dot(e1, e2)
# coef = sci.misc.comb(n, i)
# B[:, 0] += coef * (1-t)**(n-i) * t**i * points[i, 0]
# B[:, 1] += coef * (1-t)**(n-i) * t**i * points[i, 1]
B[:, 2] = np.linspace(points[0, 2], points[-1, 2], steps)
B[:, 3:5] = 0
# calculate the xy slope at each point of the curve
der_x = np.diff(B[:, 0])
der_y = np.diff(B[:, 1])
B[1:, 5] = np.arctan2(der_y, der_x)
# add the initial point
B[0, :] = points[0]
return B
|
403d8f6242947bc240920ea15ae6c0d72ec2d547
| 3,639,486
|
def _EAMS(track, Xmin=0.55, i0=12):
"""
Early-Age Main Sequence. Without this, the low-mass tracks do not
reach an EEP past the ZAMS before 15 Gyr.
"""
i_EAMS = _IorT_AMS(track, Xmin, i0)
return i_EAMS
|
4cde6c1e598366bbaf25ab98d2ec14b9f5a34d86
| 3,639,487
|
def decode(tokenizer, token):
"""decodes the tokens to the answer with a given tokenizer"""
answer_tokens = tokenizer.convert_ids_to_tokens(
token, skip_special_tokens=True)
return tokenizer.convert_tokens_to_string(answer_tokens)
|
4bbb58a6a0ed0d33411f9beee35ad0f2fb43698f
| 3,639,489
|
def davis_jaccard_measure(fg_mask, gt_mask):
""" Compute region similarity as the Jaccard Index.
:param fg_mask: (ndarray): binary segmentation map.
:param gt_mask: (ndarray): binary annotation map.
:return: jaccard (float): region similarity
"""
gt_mask = gt_mask.astype(np.bool)
fg_mask = fg_mask.astype(np.bool)
if np.isclose(np.sum(gt_mask), 0) and np.isclose(np.sum(fg_mask), 0):
return 1
else:
return np.sum((gt_mask & fg_mask)) / \
np.sum((gt_mask | fg_mask), dtype=np.float32)
|
96e6c47cd3b8d71206f9cf903b3827840803cf10
| 3,639,490
|
def extract_logits(logits = None, seq_pos = None):
"""
Args
logits: Tensor(batch_size,seq_length,vocab_size) e.g.(8,1024,50257)
seq_pos: list(batch_size)
Return:
output_logits: Tensor(batch_size,1,vocab_size) extract the Specified logit according to the seq_pos list .
"""
batch_size = logits.shape[0]
for i in range(batch_size):
logit = logits[i:i+1:1, seq_pos[i]:seq_pos[i]+1:1, ::]
# print("extract_logits logit shape: {}".format(logit.shape))
if i == 0 :
output_logits = logit
else:
output_logits = P.Concat()((output_logits, logit))
# print("final logits:",output_logits)
return output_logits
|
008931ca8677461de947d7a365521e1e72c53866
| 3,639,491
|
def padRect(rect, padTop, padBottom, padLeft, padRight, bounds, clipExcess = True):
"""
Pads a rectangle by the specified values on each individual side,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
# Unpack the rectangle
x, y, w, h = rect
# Pad by the specified value
x -= padLeft
y -= padTop
w += (padLeft + padRight)
h += (padTop + padBottom)
# Determine if we are clipping overflows/underflows or
# shifting the centre of the rectangle to compensate
if clipExcess == True:
# Clip any underflows
x = max(0, x)
y = max(0, y)
# Clip any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
h -= overflowY
w -= overflowX
else:
# Compensate for any underflows
underflowX = max(0, 0 - x)
underflowY = max(0, 0 - y)
x += underflowX
y += underflowY
# Compensate for any overflows
overflowY = max(0, (y + h) - bounds[0])
overflowX = max(0, (x + w) - bounds[1])
x -= overflowX
w += overflowX
y -= overflowY
h += overflowY
# If there are still overflows or underflows after our
# modifications, we have no choice but to clip them
x, y, w, h = padRect((x,y,w,h), 0, 0, 0, 0, bounds, True)
# Re-pack the padded rect
return (x,y,w,h)
|
032cafd373b59b725b8e2e28ba91e263ccae6e12
| 3,639,493
|
def gcs_csv_to_table(full_table_id: str, remote_csv_path: str) -> Table:
"""
Insert CSV from Google Storage to BigQuery Table.
:param full_table_id: Full ID of a Google BigQuery table.
:type full_table_id: str
:param remote_csv_path: Path to uploaded CSV.
:type remote_csv_path: str
:returns: str
"""
try:
gcs_csv_uri = f"gs://{GCP_BUCKET_NAME}/{remote_csv_path}"
job_config = LoadJobConfig(
autodetect=True,
skip_leading_rows=1,
source_format=SourceFormat.CSV,
)
load_job = gbq.load_table_from_uri(
gcs_csv_uri, full_table_id, job_config=job_config
)
LOGGER.info(f"Starting job {load_job.job_id}.")
LOGGER.info(load_job.result()) # Waits for table load to complete.
return gbq.get_table(full_table_id)
except BadRequest as e:
LOGGER.error(f"Invalid GCP request when creating table `{full_table_id}`: {e}")
except Exception as e:
LOGGER.error(f"Unexpected error when creating table `{full_table_id}`: {e}")
|
bb0713848249e2eb4e6b89db652152c6485af0ee
| 3,639,494
|
import math
def turn_xyz_into_llh(x,y,z,system):
"""Convert 3D Cartesian x,y,z into Lat, Long and Height
See http://www.ordnancesurvey.co.uk/gps/docs/convertingcoordinates3D.pdf"""
a = abe_values[system][0]
b = abe_values[system][1]
e2 = abe_values[system][2]
p = math.sqrt(x*x + y*y)
long = math.atan(y/x)
lat_init = math.atan( z / (p * (1.0 - e2)) )
v = a / math.sqrt( 1.0 - e2 * (math.sin(lat_init) * math.sin(lat_init)) )
lat = math.atan( (z + e2*v*math.sin(lat_init)) / p )
height = (p / math.cos(lat)) - v # Ignore if a bit out
# Turn from radians back into degrees
long = long / 2 / math.pi * 360
lat = lat / 2 / math.pi * 360
return [lat,long,height]
|
304facd429083032e611f2f9aca09b298a40a48b
| 3,639,496
|
def _TileGrad(op, grad):
"""Sum reduces grad along the tiled dimensions."""
input_shape = array_ops.shape(op.inputs[0])
# We interleave multiples and input_shape to get split_shape,
# reshape grad to split_shape, and reduce along all even
# dimensions (the tiled dimensions) to get the result
# with shape input_shape. For example
# input_shape = [20, 30, 40]
# multiples = [2, 3, 4]
# split_shape = [2, 20, 3, 30, 4, 40]
# axes = [0, 2, 4]
split_shape = array_ops.reshape(
array_ops.transpose(array_ops.stack([op.inputs[1], input_shape])), [-1])
axes = math_ops.range(0, array_ops.size(split_shape), 2)
# Sum reduces grad along the first dimension for IndexedSlices
if isinstance(grad, ops.IndexedSlices):
grad = math_ops.unsorted_segment_sum(
grad.values,
math_ops.mod(grad.indices, input_shape[0]),
input_shape[0])
split_shape = array_ops.concat([[1], split_shape[1:]], axis=0)
input_grad = math_ops.reduce_sum(array_ops.reshape(grad, split_shape), axes)
# Fix shape inference
if not context.executing_eagerly():
input_grad.set_shape(op.inputs[0].get_shape())
return [input_grad, None]
|
21294667ac3a31082cc2a3d09120330ce3ec1564
| 3,639,497
|
def obj_spatial_error_sum_and_naturalness_jac(s, data):
""" jacobian of error function. It is a combination of analytic solution
for motion primitive model and numerical solution for kinematic error
"""
# Extract relevant parameters from data tuple.
# Note other parameters are used for calling obj_error_sum
gmm = data[0].get_gaussian_mixture_model()
error_scale = data[-1]
quality_scale = data[-2]
logLikelihoods = _estimate_log_gaussian_prob(s, gmm.means_, gmm.precisions_cholesky_, 'full')
logLikelihoods = np.ravel(logLikelihoods)
numerator = 0
n_models = len(gmm.weights_)
for i in range(n_models):
numerator += np.exp(logLikelihoods[i]) * gmm.weights_[i] * np.dot(np.linalg.inv(gmm.covars_[i]), (s - gmm.means_[i]))
denominator = np.exp(gmm.score([s])[0])
logLikelihood_jac = numerator / denominator
kinematic_jac = approx_fprime(s, obj_spatial_error_sum, 1e-7, data[-2:])# ignore the kinematic factor and quality factor
jac = logLikelihood_jac * quality_scale + kinematic_jac * error_scale
return jac
|
e0f57a88e3b490abc8eb9dbc636701c4a06ffc05
| 3,639,498
|
from datetime import datetime
def today():
"""Ritorna il giorno di oggi in formato YYYYMMDD"""
today = datetime.date.today()
return today.strftime("%Y%m%d")
|
fdf9c83153667fb3324f31893bf3721566dea4d3
| 3,639,499
|
from .common import OcrResult
def recognize(img, lang, *, hints=None):
"""
识别图像中的文本并返回 OcrResult
:param img: 需要识别的图像, PIL.Image.Image 对象
:param lang: 需要识别的语言,BCP-47 格式字符串
:param hints: 对 OCR 引擎的提示,OcrHint 中定义的值的列表
:returns: OcrResult
OcrResult = {
lines: Tuple[OcrLine],
extra: Any # 引擎返回的额外信息
}
OcrLine = {
words: Tuple[OcrWord],
extra: Any
}
OcrWord = {
text: str,
rect: Rect,
extra: Any
}
"""
return OcrResult(tuple())
|
ea2ef038122b4953e49d787753ef112a6efe8c1c
| 3,639,500
|
def check_missing_requirements ():
"""This list of missing requirements (mencoder, mplayer, lame, and mkvmerge).
Returns None if all requirements are in the execution path.
"""
missing = []
if which("mencoder") is None:
missing.append("mencoder")
if which("mplayer") is None:
missing.append("mplayer")
if which("lame") is None:
missing.append("lame")
if which("mkvmerge") is None:
missing.append("mkvmerge")
if len(missing)==0:
return None
return missing
|
6351621a8a2ebfb52b06cb1f99fce0e02a263d08
| 3,639,501
|
def getExpMat(xy, shape, start, end, r, repeats=5):
"""
Get the expected interaction contact matrix.
xy is [[x,y]]
shape is () shape from the observed matrix.
r is resolution
"""
mat = []
i = 0
while i < repeats:
a = xy[:, 0]
b = xy[:, 1]
np.random.shuffle(a)
np.random.shuffle(b)
xy[:, 0] = a
xy[:, 1] = b
s = b-a
s = np.where( s > 0)[0]
nxy = xy[s,]
nmat = getObsMat(nxy, start, end, r)
if nmat.shape == shape:
mat.append(nmat)
i += 1
mat = np.array(mat)
return mat.mean(axis=0)
|
1aefed157a961447a562f5ea214ea55cdf6340b8
| 3,639,502
|
from crds.tests import test_table_effects, tstmod
def main():
"""Run module tests, for now just doctests only."""
return tstmod(test_table_effects)
|
c8eaba2e58c8f3b75250e8ca250d123f11670635
| 3,639,504
|
def etminan(C, Cpi, F2x=3.71, scale_F2x=True):
"""Calculate the radiative forcing from CO2, CH4 and N2O.
This function uses the updated formulas of Etminan et al. (2016),
including the overlaps between CO2, methane and nitrous oxide.
Reference: Etminan et al, 2016, JGR, doi: 10.1002/2016GL071930
Inputs:
C: [CO2, CH4, N2O] concentrations, [ppm, ppb, ppb]
Cpi: pre-industrial [CO2, CH4, N2O] concentrations
Keywords:
F2x: radiative forcing from a doubling of CO2.
scale_F2x: boolean. Scale the calculated value to the specified F2x?
Returns:
3-element array of radiative forcing: [F_CO2, F_CH4, F_N2O]
"""
Cbar = 0.5 * (C[0] + Cpi[0])
Mbar = 0.5 * (C[1] + Cpi[1])
Nbar = 0.5 * (C[2] + Cpi[2])
# Tune the coefficient of CO2 forcing to acheive desired F2x, using
# pre-industrial CO2 and N2O. F2x_etminan ~= 3.801.
scaleCO2 = 1
if scale_F2x:
F2x_etminan = (
-2.4e-7*Cpi[0]**2 + 7.2e-4*Cpi[0] - 2.1e-4*Cpi[2] + 5.36) * np.log(2)
scaleCO2 = F2x/F2x_etminan
F = np.zeros(3)
F[0] = (-2.4e-7*(C[0] - Cpi[0])**2 + 7.2e-4*np.fabs(C[0]-Cpi[0]) - \
2.1e-4 * Nbar + 5.36) * np.log(C[0]/Cpi[0]) * scaleCO2
F[1] = (-1.3e-6*Mbar - 8.2e-6*Nbar + 0.043) * (np.sqrt(C[1]) - \
np.sqrt(Cpi[1]))
F[2] = (-8.0e-6*Cbar + 4.2e-6*Nbar - 4.9e-6*Mbar + 0.117) * \
(np.sqrt(C[2]) - np.sqrt(Cpi[2]))
return F
|
8f80ecb153c94b806edbeffe3d384722333d2226
| 3,639,506
|
def render_manage_data_store_pages(request, html_file):
"""
Generate management pages for data_stores.
"""
# initialize session
session_maker = app.get_persistent_store_database('main_db',
as_sessionmaker=True)
session = session_maker()
data_stores = session.query(DataStore) \
.filter(DataStore.id > 1) \
.order_by(DataStore.name) \
.all()
context = {
'data_stores': data_stores,
}
table_html = \
render(request,
'streamflow_prediction_tool/{}'.format(html_file),
context)
# in order to close the session, the request needed to be rendered first
session.close()
return table_html
|
ae73c2f1f88566f7a44992c1a8c5063cc099bf93
| 3,639,507
|
def validate_password_form(p1, p2, is_open, btn, sp1, sp2):
"""Validade password form
Returns
Output('password1', 'invalid'),
Output('password2', 'invalid'),
Output('password1', 'title'),
Output('password2', 'title'),
"""
invalid = {'p1':sp1, 'p2':sp2}
title = {'p1':None, 'p2':None}
ctx = dash.callback_context
if ctx.triggered:
btn_id = ctx.triggered[0]['prop_id'].split('.')[0]
if btn_id == 'modal' or btn_id == 'clear':
return False, False, None, None
if p1:
pwd_check = password_check(p1)
if not pwd_check['ok']:
invalid['p1'] = True
if pwd_check['length_error']:
title['p1']= _(
'The password must be at least 8 characters long.'
)
elif pwd_check['digit_error']:
title['p1'] = _('The password must have numbers.')
elif pwd_check['uppercase_error'] or pwd_check['lowercase_error']:
title['p1'] = _(
'The password must haver uppercase and lowercase letters.'
)
elif pwd_check['symbol_error']:
title['p1'] = _('The password must have special symbols.')
else:
invalid['p1'] = False
if p2:
if not p1:
invalid['p2'] = True
title['p2'] = _('Fill password field.')
elif not p1==p2:
invalid['p2'] = True
title['p2'] = _('Passwords don\'t match.')
else:
invalid['p2'] = False
return invalid['p1'], invalid['p2'], title['p1'], title['p2']
|
d94d577951dc11eafa10d3dbe1cf17ce32957e85
| 3,639,508
|
def cardinal_spline(points,tension=0.5):
"""Path instructions for a cardinal spline. The spline interpolates the control points.
Args:
points (list of 2-tuples): The control points for the cardinal spline.
tension (float, optional): Tension of the spline in the range [0,1]. Defaults to 0.5.
Returns:
string: Ipe path instructions
"""
instructions = [ str(points[0][0]), str(points[0][1]), 'm' ] + [ f(p) for p in points[1:] for f in [ lambda p: str(p[0]), lambda p: str(p[1])] ] + [str(tension),'C ']
return ' '.join(instructions)
|
a549b5fcd8df2cb311563a495029901bc1edb1c1
| 3,639,509
|
def extract(x, *keys):
"""
Args:
x (dict or list): dict or list of dicts
Returns:
(tuple): tuple with the elements of the dict or the dicts of the list
"""
if isinstance(x, dict):
return tuple(x[k] for k in keys)
elif isinstance(x, list):
return tuple([xi[k] for xi in x] for k in keys)
else:
raise NotImplementedError
|
c0730556786586011b0b22ae5003c2fe9ccb2894
| 3,639,510
|
def get_source_config_from_ctx(_ctx,
group_name=None,
hostname=None,
host_config=None,
sources=None):
"""Generate a source config from CTX.
:param _ctx: Either a NodeInstance or a RelationshipInstance ctx.
:param group_name: User's override value, like 'webservers'.
:param hostname: User's override value, like 'web'.
:param host_config: User's override value. Like:
{
'ansible_host': '127.0.0.1',
'ansible_user': 'ubuntu',
}
:param sources: User's sources override value.
:return:
"""
sources = sources or {}
if _ctx.type == NODE_INSTANCE and \
'cloudify.nodes.Compute' not in _ctx.node.type_hierarchy and \
_ctx.instance.runtime_properties.get(SOURCES):
return AnsibleSource(_ctx.instance.runtime_properties[SOURCES]).config
elif _ctx.type == RELATIONSHIP_INSTANCE:
host_config = host_config or \
get_host_config_from_compute_node(_ctx.target)
group_name, hostname = \
get_group_name_and_hostname(
_ctx.target, group_name, hostname)
additional_node_groups = get_additional_node_groups(
_ctx.target.node.name, _ctx.deployment.id)
else:
host_config = host_config or \
get_host_config_from_compute_node(_ctx)
group_name, hostname = \
get_group_name_and_hostname(
_ctx, group_name, hostname)
additional_node_groups = get_additional_node_groups(
_get_node(_ctx).name, _ctx.deployment.id)
if '-o StrictHostKeyChecking=no' not in \
host_config.get('ansible_ssh_common_args', ''):
_ctx.logger.warn(
'This value {0} is not included in Ansible Configuration. '
'This is required for automating host key approval.'.format(
{'ansible_ssh_common_args': '-o StrictHostKeyChecking=no'}))
hosts = {
hostname: host_config
}
sources[group_name] = {
HOSTS: hosts
}
for additional_group in additional_node_groups:
sources[additional_group] = {HOSTS: {hostname: None}}
return AnsibleSource(sources).config
|
40b293ac9e63d96919b43cd60650e7d46ff34d57
| 3,639,511
|
def index(request):
"""Show welcome to the sorting quiz."""
template = loader.get_template("ggpoll/index.html")
context = {}
return HttpResponse(template.render(context, request))
|
68eb4afde5066f1a4a097f0d725ec682e803ace4
| 3,639,512
|
def get_all_random_experiment_histories_from_files(experiment_path_prefix, net_count):
""" Read history-arrays from all specified npz-files with net_number from zero to 'net_count' and return them as
one ExperimentHistories object. """
assert net_count > 0, f"'net_count' needs to be greater than 0, but is {net_count}."
histories = get_random_experiment_histories_from_file(experiment_path_prefix, 0)
for net_number in range(1, net_count):
current_histories = get_random_experiment_histories_from_file(experiment_path_prefix, net_number)
histories = histories.stack_histories(current_histories)
return histories
|
fc36d2234025aea0e6232f15ac505adc489795c5
| 3,639,514
|
def convert_index_to_indices(index_ls, shape):
"""
将 index_ls 格式的坐标列表转换为 indices_ls 格式
"""
assert index_ls.size <= np.prod(shape)
source = np.zeros(shape=shape)
zip_indices = np.where(source >= 0)
indices_ls = convert.zip_type_to_indices(zip_indices=zip_indices)
indices_ls = indices_ls[index_ls]
return indices_ls
|
3f1861820f81d27b7a6b0878bc768dca84fd6b3b
| 3,639,516
|
def _gen_off_list(sindx):
"""
Given a starting index and size, return a list of numbered
links in that range.
"""
def _gen_link_olist(osize):
return list(range(sindx, sindx + osize))
return _gen_link_olist
|
863ccdc08f6a7cadccc3c5ccfd0cb92a223aadda
| 3,639,518
|
def report_operation_log_list(request):
"""
返回常规操作的日志列表
:param request:
:return:
"""
return administrator.report_operation_log_list(request)
|
c2bbeda9a3342e9ce9667f9ec6f232f171a164e0
| 3,639,519
|
def connect_redis(redis_host, redis_port, redis_db):
""" connect to redis """
global _conn
if _conn is None:
print "connect redis %s (%s)" % ("%s:%s" % (redis_host, redis_port),
os.getpid())
_conn = redis.Redis(host=redis_host, port=redis_port,
db=redis_db)
return _conn
|
0b3f51fcbe78e7d8075398675b62ae95f56f06b6
| 3,639,520
|
def loadSV(fname, shape=None, titles=None, aligned=False, byteorder=None,
renamer=None, **kwargs):
"""
Load a delimited text file to a numpy record array.
Basically, this function calls loadSVcols and combines columns returned by
that function into a numpy ndarray with stuctured dtype. Also uses and
returns metadata including column names, formats, coloring, &c. if these
items are determined during the loading process.
**Parameters**
**fname** : string or file object
Path (or file object) corresponding to a separated variable
(CSV) text file.
**names** : list of strings
Sets the names of the columns of the resulting tabarray. If
not specified, `names` value is determined first by looking for
metadata in the header of the file, and if that is not found,
are assigned by NumPy's `f0, f1, ... fn` convention. See
**namesinheader** parameter below.
**formats** : string or list of strings
Sets the datatypes of the columns. The value of `formats` can
be a list or comma-delimited string of values describing values
for each column (e.g. "str,str,int,float" or
["str", "str", "int", "float"]), a single value to apply to all
columns, or anything that can be used in numpy.rec.array
constructor.
If the **formats** (or **dtype**) parameter are not specified,
typing is done by inference. See **typer** parameter below.
**dtype** : numpy dtype object
Sets the numpy dtype of the resulting tabarray, combining column
format and column name information. If dtype is set, any
**names** and **formats** specifications will be overriden. If
the **dtype** (or **formats**) parameter are not specified,
typing is done by inference. See **typer** parameter below.
The **names**, **formats** and **dtype** parameters duplicate
parameters of the NumPy record array creation inferface. Additional
paramters of the NumPy inferface that are passed through are
**shape**, **titles**, **byteorder** and **aligned** (see NumPy
documentation for more information.)
**kwargs**: keyword argument dictionary of variable length
Contains various parameters to be passed down to loadSVcols. These may
include **skiprows**, **comments**, **delimiter**, **lineterminator**,
**uselines**, **usecols**, **excludecols**, **metametadata**,
**namesinheader**,**headerlines**, **valuefixer**, **linefixer**,
**colfixer**, **delimiter_regex**, **inflines**, **typer**,
**missingvalues**, **fillingvalues**, **verbosity**, and various CSV
module parameters like **escapechar**, **quoting**, **quotechar**,
**doublequote**, **skipinitialspace**.
**Returns**
**R** : numpy record array
Record array constructed from data in the SV file
**metadata** : dictionary
Metadata read and constructed during process of reading file.
**See Also:**
:func:`tabular.io.loadSVcols`, :func:`tabular.io.saveSV`,
:func:`tabular.io.DEFAULT_TYPEINFERER`
"""
[columns, metadata] = loadSVcols(fname, **kwargs)
if 'names' in metadata.keys():
names = metadata['names']
else:
names = None
if 'formats' in metadata.keys():
formats = metadata['formats']
else:
formats = None
if 'dtype' in metadata.keys():
dtype = metadata['dtype']
else:
dtype = None
if renamer is not None:
print 'Trying user-given renamer ...'
renamed = renamer(names)
if len(renamed) == len(uniqify(renamed)):
names = renamed
print '''... using renamed names (original names will be in return
metadata)'''
else:
print '... renamer failed to produce unique names, not using.'
if names and len(names) != len(uniqify(names)):
print 'Names are not unique, reverting to default naming scheme.'
names = None
return [utils.fromarrays(columns, type=np.ndarray, dtype=dtype,
shape=shape, formats=formats, names=names,
titles=titles, aligned=aligned,
byteorder=byteorder), metadata]
|
94ac8943ff50273162066db53040107406f27059
| 3,639,522
|
def rationalize_quotes_from_table(table, rationalizeBase=10000):
"""
Retrieve the data from the given table of the SQLite database
It takes parameters:
table (this is one of the Quote table models: Open, High, Low, or Close)
It returns a tuple of lists
"""
first_row = table.select().limit(1).get()
rationalize_bull_1x_price = rationalizeBase / first_row.bull_1x_price
rationalize_bear_1x_price = rationalizeBase / first_row.bear_1x_price
rationalize_bull_3x_price = rationalizeBase / first_row.bull_3x_price
rationalize_bear_3x_price = rationalizeBase / first_row.bear_3x_price
indices = []
dates = []
bull_1x_prices = []
bear_1x_prices = []
bull_3x_prices = []
bear_3x_prices = []
for row in table.select():
indices.append(row.id)
dates.append(row.date)
bull_1x_prices.append(row.bull_1x_price * rationalize_bull_1x_price)
bear_1x_prices.append(row.bear_1x_price * rationalize_bear_1x_price)
bull_3x_prices.append(row.bull_3x_price * rationalize_bull_3x_price)
bear_3x_prices.append(row.bear_3x_price * rationalize_bear_3x_price)
return indices, dates, bull_1x_prices, bear_1x_prices, bull_3x_prices, bear_3x_prices
|
ee1c8310c12e7e53e9ca2677dd61d7d2525603fd
| 3,639,523
|
def k(func):
"""定义一个装饰器函数"""
def m(*args, **kw):
print('call %s():' % func.__name__)
return func(*args, **kw)
return m
|
3cc958033fd66547e523882435494f27ae81b096
| 3,639,524
|
import requests
def get_playlist_object(playlist_url, access_token):
"""
playlist_url : url of spotify playlist
access_token : access token gotten from client credentials authorization
return object containing playlist tracks
"""
playlist_id = playlist_url.split("/")[-1]
playlist_endpoint = f"https://api.spotify.com/v1/playlists/{playlist_id}"
get_header = {
"Authorization" : "Bearer " + access_token
}
# API request
response = requests.get(playlist_endpoint, headers=get_header)
playlist_object = response.json()
return playlist_object
|
8c7ed1a1b9574e2e0870d3091452accf5909f982
| 3,639,525
|
from typing import Tuple
def guess_identifiers(fuzzy_base_name: str) -> Tuple[str, str]:
"""
Given a fuzzy base name, guess the corresponding (item ID, base name)
identifier pair.
:param fuzzy_base_name: The base name to be matched.
:return: The identifier pair with the closest matching base name.
"""
sql = 'SELECT base_name FROM item_info'
choices = _conn.execute(sql).fetchall()
base_name = process.extractOne(fuzzy_base_name, choices)[0][0]
sql2 = 'SELECT item_id FROM item_info WHERE base_name = ?'
item_id = _conn.execute(sql2, (base_name,)).fetchone()[0]
return item_id, base_name
|
6ac609268a92c16408eb414d7944cbd09fedfcc5
| 3,639,526
|
def make_image(center=(.1,-.4),dpi=500,X_cut_min = -.59 -xcut_offset,Y_cut_max = 1.61
+ ycut_offset,X_cut_max = .12-xcut_offset,Y_cut_min = .00 +ycut_offset,bands=23 ):
"""make visual count it by area then have hist values for normalization wih movement data
to be exported and then can be counted
PARAMS
------------
center : tuple
where beacon is
dpi : int
dots per inch - resolution - if changed can mess up pixel count
X_cut,Y_cut : int
points of rectagle, same as used for cutting of rears - floor of arena
bands : int
amount of circles fittign inthe rectangle - max is 23
Returns
------------
Histogram and appropriate bins made by the histogram
Used for area estimation later on
"""
fig, ax1 = plt.subplots(1, 1, sharex=True,dpi=dpi,)
fig.patch.set_visible(False)
rectangle = patches.Rectangle((X_cut_min,Y_cut_min), (abs(X_cut_min)+abs(X_cut_max)),abs(Y_cut_min)+abs(Y_cut_max) , color="white")
ax1.add_patch(rectangle)
#plt.plot(center[0],center[1], "ro")
color = np.linspace(0,.99,bands+1)
for i in reversed(range(bands)):
c=color[i]
patch = patches.Circle((center[0],center[1]), radius=.075*i,color=str(c))
ax1.add_patch(patch)
patch.set_clip_path(rectangle)
ax1.axis("equal")
ax1.axis("off")
fig.savefig('norm_graph.png', dpi=dpi, transparent=True)
img= Image.frombytes('RGB',fig.canvas.get_width_height(),fig.canvas.tostring_rgb())
image_array = np.asarray(img)
hist, bins = np.histogram(image_array,bins=bands,range=(0,249))
#plt.show()
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
#plt.bar(center, hist, align='center', width=width)
return hist,bins
|
a90a92318b30e5e4069dc415d5a4693d844b8c18
| 3,639,527
|
def read_answer_patterns(pattern_file_path):
"""load answer patterns into qid2patterns dictionary
"""
qid2patterns = {}
last_qid = None
with open(pattern_file_path) as f:
for line in f :
qid, pattern = line.strip().split("\t")
if qid != last_qid : # start collecting patterns for a new qid
if last_qid != None: # if not the first question
qid2patterns[last_qid] = patterns
last_qid = qid
patterns = [pattern]
else: # collect patterns for the current qid
patterns.append(pattern)
qid2patterns[last_qid] = patterns
return qid2patterns
|
da8f018deb15088b359044f22cfa71f0b8305af7
| 3,639,528
|
def fitness(coords, solution):
"""
Total distance of the current solution path.
"""
N = len(coords)
cur_fit = 0
for i in range(N):
cur_fit += dist(coords[solution[i % N]], coords[solution[(i + 1) % N]])
return cur_fit
|
716dee705be75bbf6f64ec55b39187ab567edfa0
| 3,639,529
|
import numpy as np
def f_of_sigma(sigma,A=0.186,a=1.47,b=2.57,c=1.19):
"""
The prefactor in the mass function parametrized
as in Tinker et al 2008. The default values
of the optional parameters correspond to a mean
halo density of 200 times the background. The
values can be found in table 2 of
Tinker 2008 ApJ 679, 1218
Parameters
----------
sigma: float
Standard deviation of the linear power spectrum
A,a,b,c: float, optional
0.186 by default
Returns
-------
f: float
Value of f(sigma)
"""
f = A*((sigma/b)**(-a)+1)*np.exp(-c/sigma/sigma)
return f
|
89abe82df8a4384e74eb556172c9d46193b731da
| 3,639,530
|
from typing import Sequence
import time
def create_role(
role_name: str,
base_session: boto3.Session,
region: str,
auto_trust_caller_identity=True,
allowed_services: Sequence[str] = [],
allowed_aws_entities: Sequence[str] = [],
external_id: str = None,
):
"""
Creates a role that lets a list of specified services assume the role.
:return: The newly created role.
"""
iam = base_session.client("iam")
trusted_entities = set(allowed_aws_entities)
if auto_trust_caller_identity:
trusted_entities.add(get_caller_identity(base_session, region))
try:
role = exponential_retry(
iam.create_role,
["AccessDenied", "ServiceFailureException"],
RoleName=role_name,
AssumeRolePolicyDocument=_get_trust_policy(allowed_services, trusted_entities, external_id),
MaxSessionDuration=MAX_ASSUME_ROLE_DURATION,
)
if "role_exists" in iam.waiter_names:
iam.get_waiter("role_exists").wait(RoleName=role_name)
time.sleep(3)
else:
time.sleep(15) # give some time for IAM propagation
module_logger.info(f"Created role {role_name} for new services {allowed_services} and entities {trusted_entities}")
except ClientError as ex:
module_logger.exception("Couldn't create role %s. Exception: %s", role_name, str(ex))
raise
else:
return role
|
7fe2043235f3391af96c0767fa7e79f2ef9d8ce3
| 3,639,531
|
def get_auto_switch_state(conn):
"""Get the current auto switch enabled / disabled state"""
packet = _request(conn, GET_AUTO_SWITCH_STATE)
if not _validate_packet(packet):
raise ChecksumError()
return _decode_toggle(packet)
|
f87f838bafb03e9d9fbea6e9a6285ede56dbb09d
| 3,639,532
|
def SGD(X, y, lmd, gradient, n_epochs, M, opt = "SGD", eta0 = None, eta_type = 'schedule', t0=5, t1=50, momentum = 0., rho = 0.9, b1 = 0.9, b2 = 0.999):
"""Stochastic Gradient Descent Algorithm
Args:
- X (array): design matrix (training data)
- y (array): output dataset (training data)
- gradient (function): function to compute the gradient
- n_epochs (int): number of epochs
- M (int): size of minibatches
- opt (string): "SGD", "ADAGRAD", "RMS", "ADAM" - different optimizers
- eta0 (float): learning rate if 'static' or 'invscaling'
- eta_type = 'static', 'schedule', 'invscaling', 'hessian' - different methods for evaluating the learning rate
- t0 (float): initial paramenter to compute the learning rate in 'schedule'
- t1 (float): sequential paramenter to compute the learning rate in 'schedule'
- momentum, rho, b1, b2 (float): parameters for different optimizers
Returns:
beta/theta-values"""
if opt not in optimizers:
raise ValueError("Optimizer must be defined in "+str(optimizers))
if eta_type not in eta_types:
raise ValueError("Learning rate type must be defined within "+str(eta_types))
theta = np.random.randn(X.shape[1])
m = int(X.shape[0]/M)
v = np.zeros(X.shape[1]) # parameter for velocity (momentum), squared-gradient (adagrad, RMS),
ma = np.zeros(X.shape[1]) # parameter for adam
delta = 1e-1
for epoch in range(n_epochs):
for i in range(m):
random_index = M*np.random.randint(m)
Xi = X[random_index:random_index + M]
yi = y[random_index:random_index + M]
gradients = gradient(Xi, yi, theta, lmd) #* X.shape[0] #2.0 * Xi.T @ ((Xi @ theta)-yi)
# Evaluate the hessian metrix to test eta < max H's eigvalue
H = (2.0/X.shape[0])* (X.T @ X)
eigvalues, eigvects = np.linalg.eig(H)
eta_opt = 1.0/np.max(eigvalues)
eta = eta_opt
if not eta0:
eta0=eta
if eta_type == 'static':
eta = eta0
elif eta_type == 'schedule':
eta = learning_schedule(epoch*m+i, t0=t0, t1=t1)
elif eta_type == 'invscaling':
power_t = 0.25 # one can change it but I dont want to overcrowd the arguments
eta = eta0 / pow(epoch*m+i, power_t)
elif eta_type == 'hessian':
pass
#assert eta > eta_opt, "Learning rate higher than the inverse of the max eigenvalue of the Hessian matrix: SGD will not converge to the minimum. Need to set another learning rate or its paramentes."
if opt == "SDG":
v = momentum * v - eta * gradients
theta = theta + v
elif opt == "ADAGRAD":
v = v + np.multiply(gradients, gradients)
theta = theta - np.multiply(eta / np.sqrt(v+delta), gradients)
elif opt == "RMS":
v = rho * v + (1. - rho) * np.multiply(gradients, gradients)
theta = theta - np.multiply(eta / np.sqrt(v+delta), gradients)
elif opt == "ADAM":
ma = b1 * ma + (1. - b1) * gradients
v = b2 * v + (1. - b2) * np.multiply(gradients, gradients)
ma = ma / (1. - b1)
v = v / (1. - b2)
theta = theta - np.multiply(eta / np.sqrt(v+delta), ma)
return theta
|
916edb97d98757b6f18092a3c83622fb982ddfcb
| 3,639,533
|
def get_last_oplog_entry(client):
"""
gets most recent oplog entry from the given pymongo.MongoClient
"""
oplog = client['local']['oplog.rs']
cursor = oplog.find().sort('$natural', pymongo.DESCENDING).limit(1)
docs = [doc for doc in cursor]
if not docs:
raise ValueError("oplog has no entries!")
return docs[0]
|
069497ffd6eb0354c00858695d065695c617b5e6
| 3,639,534
|
import math
def schmidt_quasi_norm(size):
"""
Returns an array of the Schmidt Quasi-normalised values
Array is symmetrical about the diagonal
"""
schmidt = square_array(size)
for n in range(size):
for m in range(n + 1):
if n == 0:
double = 1
else:
double = double_factorial(2 * n - 1)
schmidt[m][n] = (
math.sqrt(
((2 - kronecker_delta(0, m)) * math.factorial(n - m))
/ math.factorial(n + m)
)
* double
/ math.factorial(n - m)
)
return schmidt
|
b71b6a7733eb2b88f107ca904cf570c8f5841263
| 3,639,535
|
def _merge_jamos(initial, medial, final=None):
"""Merge Jamos into Hangul syllable.
Raises:
AssertionError: If ``initial``, ``medial``, and ``final`` are not in
``INITIAL``, ``MEDIAL``, and ``FINAL`` respectively.
"""
assert initial in INITIALS
assert medial in MEDIALS
final = "∅" if final is None else final
assert final in FINALS
return chr(0xAC00 +
588 * _INITIALS_IDX[initial] +
28 * _MEDIALS_IDX[medial] +
_FINALS_IDX[final])
|
9ff0e53d8decfc3db74d319fd366595bcac18e5c
| 3,639,536
|
def get_feature_set(eq, features):
"""Get features from their strings
Arguments:
eq {Equity} -- equity to build around
features {string array} -- features and params to use
Returns:
list -- list of ndarray of floats
"""
feature_set = []
for feature in features:
f = get_feature(eq, feature)
for feat in f:
feature_set.append(feat)
return feature_set
|
78b4ad05b98ec776e7f07f16736739c909ce1e64
| 3,639,537
|
from typing import Optional
def class_http_endpoint(methods: METHODS, rule_string: str, side_effect: Optional[HTTP_SIDE_EFFECT] = None, **kwargs):
"""
Creates an HTTP endpoint template. Declare this as a class variable in your webserver subclass to automatically add
the endpoint to all instances. Can be used as a decorator.
Args:
methods: forwarded to MockHTTPEndpoint
rule_string: forwarded to MockHTTPEndpoint
side_effect: forwarded to MockHTTPEndpoint
**kwargs: forwarded to MockHTTPEndpoint
Returns:
A new http endpoint template
"""
def ret(side_effect_method):
return HTTPEndpointTemplate(methods, rule_string, side_effect_method=side_effect_method, **kwargs)
if side_effect is not None:
return ret(side_effect)
return ret
|
62449e088ff66080a7165497d6f2434971818f62
| 3,639,538
|
def value(iterable, key=None, position=1):
"""Generic value getter. Returns containing value."""
if key is None:
if hasattr(iterable, '__iter__'):
return iterable[position]
else:
return iterable
else:
return iterable[key]
|
df49496ab8fa4108d0c3d04035ffa318a9c6a035
| 3,639,539
|
def find_aa_seqs(
aa_seq: str,
var_sites: str,
n_flanking: int = 7
):
"""Grabs the flanking AA sequence around a given location in a protein sequence string.
Args:
aa_seq: Protein sequence string.
var_sites: Integer location of the site of interest (1-indexed, not 0-indexed).
n_flanking: The number of flanking AAs to grab around the site of interest.
Returns: AA sequence centered around var_site.
"""
sites = [max(int(v.strip())-1, 0) for v in var_sites.split(var_site_delimiter)]
seqs = []
for var_site in sites:
n = int(var_site)
if len(aa_seq) < n:
return '_'*(1+(n_flanking*2))
left_ = '_'*max((n_flanking - n), 0)
right_ = '_'*max(((n+n_flanking+1) - len(aa_seq)), 0)
aas = aa_seq[max((n-n_flanking), 0):min(len(aa_seq), (n+n_flanking+1))]
seqs.append(left_ + aas + right_)
return var_site_delimiter.join(seqs)
|
f6b75215347eb829d2b023138abeff3a44ab1d36
| 3,639,540
|
def compact_interval_string(value_list):
"""Compact a list of integers into a comma-separated string of intervals.
Args:
value_list: A list of sortable integers such as a list of numbers
Returns:
A compact string representation, such as "1-5,8,12-15"
"""
if not value_list:
return ''
value_list.sort()
# Start by simply building up a list of separate contiguous intervals
interval_list = []
curr = []
for val in value_list:
if curr and (val > curr[-1] + 1):
interval_list.append((curr[0], curr[-1]))
curr = [val]
else:
curr.append(val)
if curr:
interval_list.append((curr[0], curr[-1]))
# For each interval collapse it down to "first, last" or just "first" if
# if first == last.
return ','.join([
'{}-{}'.format(pair[0], pair[1]) if pair[0] != pair[1] else str(pair[0])
for pair in interval_list
])
|
b479b45dc68a0bce9628a19be17185437f3edca6
| 3,639,542
|
def _mk_asm() -> str:
"""
Generate assembly to program all allocated translation tables.
"""
string = ""
for n,t in enumerate(table.Table._allocated):
string += _mk_table(n, t)
keys = sorted(list(t.entries.keys()))
while keys:
idx = keys[0]
entry = t.entries[idx]
if type(entry) is Region:
string += _mk_blocks(n, t, idx, entry)
for k in range(idx, idx+entry.num_contig):
keys.remove(k)
else:
string += _mk_next_level_table(n, idx, entry)
keys.remove(idx)
return string
|
a1e6725b20877c10a400d8f13890e914adf8024b
| 3,639,543
|
def getNuitkaModules():
""" Create a list of all modules known to Nuitka.
Notes:
This will be executed at most once: on the first time when a module
is encountered and cannot be found in the recorded calls (JSON array).
Returns:
List of all modules.
"""
mlist = []
for m in getRootModules():
if m not in mlist:
mlist.append(m)
for m in done_modules:
if m not in mlist:
mlist.append(m)
for m in uncompiled_modules:
if m not in mlist:
mlist.append(m)
for m in active_modules:
if m not in mlist:
mlist.append(m)
return mlist
|
7596c18f2b38883f1f8c3201597b57fc6096752b
| 3,639,544
|
def run_model(network):
"""
Runs a model with pre-defined values.
"""
model = network(vocab_size+1, EMBEDDING_SIZE)
model.cuda()
EPOCHS = 20
train_model(model, train, epochs=EPOCHS, echo=False)
return model
|
a10980b6f8dd5ff9e048b07ee64215187acb8467
| 3,639,545
|
def is_prime(num):
""" Assumes num > 3 """
if num % 2 == 0:
return False
for p in range(3, int(num**0.5)+1, 2): # Jumps of 2 to skip odd numbers
if num % p == 0:
return False
return True
|
e898026d0362967400cfee4e70a74ac02a64b6f1
| 3,639,546
|
def verify_password(email_or_token, password):
"""
电子邮件和密码是由User模型中现有的方法验证,如果登录密令正确,这个验证回调函数就返回True;
验证回调函数把通过认证的用户保存在Flask的全局对象g中,如此一来,视图函数便能进行访问。
注意:匿名登录时,这个函数返回True并把Flask-login提供的AnonymousUser类实例赋值给g.current_user
:param email:
:param password:
:return:
"""
if email_or_token == '':
# API蓝本支持匿名用户访问,此时客户端发送的电子邮件字段必须为空
# 也即如果该字段为空,那么假定是匿名用户
g.current_user = AnoymousUser()
return True
if password == '':
# 如果密码为空,那就假定email_or_token参数提供的是令牌,按照令牌的方式进行认证。
g.current_user = User.verify_auth_token(email_or_token)
g.token_used = True
return g.current_user is not None
# 如果两个参数都不为空,假定使用常规的邮件地址和密码进行认证。
user = User.query.filter_by(email=email_or_token).first()
if not user:
return False
g.current_user = user
g.token_used = False
return user.verify_password(password)
|
09890ad0ae33f7e700148b56098df6e3ecc69d39
| 3,639,547
|
def is_valid_orcid_id(orcid_id: str):
"""adapted from stdnum.iso7064.mod_11_2.checksum()"""
check = 0
for n in orcid_id:
check = (2 * check + int(10 if n == "X" else n)) % 11
return check == 1
|
5866e4465a24f46aa4c7015902eac53684da7b04
| 3,639,548
|
def extract_entity(entities, entity_type=_PERSON):
"""
Extract name from the entity specified in entity_type.
We use the JSON format to extract the entity information:
-
:param entities:
:param entity_type:
:return:
"""
if not entity_type:
raise ValueError('Invalid entity type')
if _ENTITIES not in entities:
raise ValueError('No entities format')
try:
extracted_entities = []
log.info('extract_entity() Searching for %s in %r', entity_type,
entities)
for entity in entities[_ENTITIES]:
# Extract entity (PERSON, ORGANIZATION)
if _TYPE in entity:
if entity[_TYPE] == entity_type:
entity_name = entity[_NAME]
log.info('extract_entity() Extracting %s from entity %s',
entity_type, entity_name)
if entity_name[0].isupper():
if entity[_METADATA]:
log.info('extract_entity() | Insert %s: %s | %s ',
entity_type, entity_name,
entity[_METADATA])
extracted_entities.append(entity[_NAME])
else:
# Filter entity name by discarding dictionary of
# words.
if not set(extract_filter()) & set(
entity_name.lower().split()):
log.info('extract_entity() | Insert %s %s ',
entity_type, entity_name)
extracted_entities.append(entity[_NAME])
return extracted_entities
except KeyError as e:
log.exception(e)
|
c357c0cc5d4365ea59f1b852b5e800fe513dfe3c
| 3,639,549
|
def reconstruct_from_patches(img_arr, org_img_size, stride=None, size=None):
"""[summary]
Args:
img_arr (numpy.ndarray): [description]
org_img_size (tuple): [description]
stride ([type], optional): [description]. Defaults to None.
size ([type], optional): [description]. Defaults to None.
Raises:
ValueError: [description]
Returns:
numpy.ndarray: [description]
"""
#print('Img_Arr : ',img_arr.shape)
#print('Orig_Img_Size : ',org_img_size)
# check parameters
if type(org_img_size) is not tuple:
raise ValueError("org_image_size must be a tuple")
if img_arr.ndim == 3:
img_arr = np.expand_dims(img_arr, axis=0)
if size is None:
size = img_arr.shape[1]
if stride is None:
stride = size
nm_layers = img_arr.shape[3]
i_max = org_img_size[0] // stride
if i_max*stride < org_img_size[0] :
i_max = i_max + 1
j_max = org_img_size[1] // stride
if j_max*stride < org_img_size[1] :
j_max = j_max + 1
#total_nm_images = img_arr.shape[0] // (i_max ** 2)
total_nm_images = img_arr.shape[0] // (i_max * j_max)
nm_images = img_arr.shape[0]
images_list = []
kk = 0
for img_count in range(total_nm_images):
img_r = np.zeros(
(i_max*stride, j_max*stride, nm_layers), dtype=img_arr[0].dtype
)
for i in range(i_max):
for j in range(j_max):
for layer in range(nm_layers):
img_r[
i * stride : i * stride + size,
j * stride : j * stride + size,
layer,
] = img_arr[kk, :, :, layer]
kk += 1
img_bg = np.zeros(
(org_img_size[0], org_img_size[1], nm_layers), dtype=img_arr[0].dtype
)
img_bg = img_r[0:org_img_size[0], 0:org_img_size[1], 0:]
images_list.append(img_bg)
return np.stack(images_list)
|
1ad80afd7d4f09de1ece4e8ba74844477d2d99be
| 3,639,551
|
import requests
def process_highlight(entry, img_width):
"""
Function processing highlights extracted from DOM tree. Downloads image based
on its url and scales it. Prettifies text by inserting newlines and
shortening author lists.
Parameters
----------
entry : dict of str
Dictionary created by extract_highlights function.
img_width : int
Width of image to resize to.
Returns
-------
dict
Highlight dict with downloaded and resized image and prettified text
"""
# 'https:' is missing in page src links
if not entry['img'].startswith("https"):
entry['img'] = "https:" + entry['img']
# fetch the image and resize it to common width
entry['img'] = resize_img_to_x(Image.open(requests.get(entry['img'], stream=True).raw), img_width)
entry['title'] = newline_join(entry['title'], max_letters['title'])
entry['authors'] = shorten_authors(entry['authors'])
entry['comment'] = newline_join(entry['comment'], max_letters['comment'])
return(entry)
|
6a09cb87971725fa3df9eb8e6e98cee315701af4
| 3,639,552
|
from geometry_msgs.msg import Pose, Point, Quaternion
def SE3ToROSPose(oMg):
"""Converts SE3 matrix to ROS geometry_msgs/Pose format"""
xyz_quat = pin.SE3ToXYZQUATtuple(oMg)
return Pose(position=Point(*xyz_quat[:3]), orientation=Quaternion(*xyz_quat[3:]))
|
ebf806bd52a4b60252a2001fe2de8122bd7cd201
| 3,639,554
|
def calib_graph_to_infer_graph(calibration_graph_def, is_dynamic_op=False):
"""Convert an existing calibration graph to inference graph.
Args:
calibration_graph_def: the calibration GraphDef object with calibration data
is_dynamic_op: whether to create dynamic static engines from calibration
Returns:
New GraphDef with TRTEngineOps placed in graph replacing calibration nodes.
Raises:
RuntimeError: if the returned status message is malformed.
"""
def py2string(inp):
return inp
def py3string(inp):
return inp.decode("utf-8")
if _six.PY2:
to_string = py2string
else:
to_string = py3string
is_calib_graph = False
for n in calibration_graph_def.node:
if n.op == "TRTEngineOp":
is_calib_graph = is_calib_graph or not n.attr["calibration_data"].s
if not is_calib_graph:
tf_logging.error(
"Not a calib graph. Doesn't seem to contain any calibration nodes.")
return None
graph_str = calibration_graph_def.SerializeToString()
out = calib_convert(graph_str, is_dynamic_op)
status = to_string(out[0])
output_graph_def_string = out[1]
del graph_str # Save some memory
if len(status) < 2:
raise _impl.UnknownError(None, None, status)
if status[:2] != "OK":
msg = status.split(";")
if len(msg) == 1:
raise RuntimeError("Status message is malformed {}".format(status))
# pylint: disable=protected-access
raise _impl._make_specific_exception(None, None, ";".join(msg[1:]),
int(msg[0]))
# pylint: enable=protected-access
output_graph_def = graph_pb2.GraphDef()
output_graph_def.ParseFromString(output_graph_def_string)
del output_graph_def_string # Save some memory
return output_graph_def
|
a29b556775aff27eed8dad404c3684e452e88c86
| 3,639,555
|
def calc_minimum_angular_variance_1d(var_r, phi_c, var_q):
"""Calculate minimum possible angular variance of a beam achievable with a correction lens.
Args:
var_r (scalar): real space variance.
phi_c (scalar): real-space curvature - see above.
var_q (scalar): angular variance of the beam.
Returns:
var_q_min (scalar): minimum possible angular variance of the beam.
"""
var_q_min = var_q - 4*phi_c**2/var_r
return var_q_min
|
c5e2144f44b532acbf8eb9dfb83c991af3756abf
| 3,639,556
|
def grid_subsampling(points, features=None, labels=None, ins_labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features)
:param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number)
:param labels: optional (N,) matrix of integer labels
:param ins_labels: optional (N,) matrix of integer instance labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.subsample(points,
sampleDl=sampleDl,
verbose=verbose)
elif (labels is None):
return cpp_subsampling.subsample(points,
features=features,
sampleDl=sampleDl,
verbose=verbose)
elif (features is None):
return cpp_subsampling.subsample(points,
classes=labels,
ins_classes=ins_labels,
sampleDl=sampleDl,
verbose=verbose)
else:
return cpp_subsampling.subsample(points,
features=features,
classes=labels,
ins_classes=ins_labels,
sampleDl=sampleDl,
verbose=verbose)
|
3aebd24307307344c0ff804b3ee189ff4da98f0d
| 3,639,557
|
def delete_all_collections_from_collection(collection, api_key=None):
"""
Delete *ALL* Collections from a Collection.
:param collection: The Collection to remove *all* Collections from.
:type collection: str
:param api_key: The API key to authorize request against.
:type api_key: str
:return
:rtype
"""
assertions.datatype_str('collection', collection)
url = '/collections/{}/collections'.format(collection)
return utils.request('DELETE', url, api_key=api_key, accept=True)
|
1a039c850f1bbf82f0c6683081874e1971c40255
| 3,639,558
|
import torch
def generate_kbit_random_tensor(size, bitlength=None, **kwargs):
"""Helper function to generate a random k-bit number"""
if bitlength is None:
bitlength = torch.iinfo(torch.long).bits
if bitlength == 64:
return generate_random_ring_element(size, **kwargs)
rand_tensor = torch.randint(0, 2 ** bitlength, size, dtype=torch.long, **kwargs)
if rand_tensor.is_cuda:
return CUDALongTensor(rand_tensor)
return rand_tensor
|
c87fc7f353b15a4dd4b6d980cf2e365f6ac6a4bc
| 3,639,559
|
import pickle
def cache_by_sha(func):
""" only downloads fresh file, if we don't have one or we do and the sha has changed """
@wraps(func)
def cached_func(*args, **kwargs):
cache = {}
list_item = args[1]
dest_dir = kwargs.get('dest_dir')
path_to_file = list_item.get('path', '')
file_out = '{}{}'.format(dest_dir, path_to_file)
p_file_out = '{}{}.pickle'.format(dest_dir, path_to_file)
makedirs(dirname(file_out), exist_ok=True)
if exists(p_file_out) and exists(file_out):
with open(p_file_out, 'rb') as pf:
cache = pickle.load(pf)
cache_sha = cache.get('sha', False)
input_sha = list_item.get('sha', False)
if cache_sha and input_sha and cache_sha == input_sha:
# do nothing as we have the up to date file already
return None
else:
with open(p_file_out, mode='wb+') as pf:
pickle.dump(list_item, pf, pickle.HIGHEST_PROTOCOL)
return func(*args, **kwargs)
return cached_func
|
d95010ba433c9b9f27dcb2f3fe05d3b609cee3fb
| 3,639,561
|
def rewrite_by_assertion(tm):
"""
Rewrite the tm by assertions. Currently we only rewrite the absolute boolean variables.
"""
global atoms
pt = refl(tm)
# boolvars = [v for v in tm.get_vars()] + [v for v in tm.get_consts()]
return pt.on_rhs(*[top_conv(replace_conv(v)) for _, v in atoms.items()]).on_rhs(*[top_conv(replace_conv(v)) for _, v in atoms.items()])
|
51eb546b9b0414091152d018aac6a6eaf2149d39
| 3,639,562
|
import json
def hash_cp_stat(fdpath, follow_symlinks=False, hash_function=hash):
""" Returns hash of file stat that can be used for shallow comparision
default python hash function is used which returns a integer. This
can be used to quickly compare files, for comparing directories
see hash_walk().
"""
stat = cp_stat(fdpath, follow_symlinks)
if stat:
return hash_function(json.dumps(stat, sort_keys=True).encode("utf-8"))
|
c16e2f00fb278d69e9307b8d528a7807d7c404d6
| 3,639,563
|
def multi_label_head(n_classes,
label_name=None,
weight_column_name=None,
enable_centered_bias=False,
head_name=None,
thresholds=None,
metric_class_ids=None,
loss_fn=None):
"""Creates a Head for multi label classification.
Multi-label classification handles the case where each example may have zero
or more associated labels, from a discrete set. This is distinct from
`multi_class_head` which has exactly one label from a discrete set.
This head by default uses sigmoid cross entropy loss, which expects as input
a multi-hot tensor of shape `(batch_size, num_classes)`.
Args:
n_classes: Integer, number of classes, must be >= 2
label_name: String, name of the key in label dict. Can be null if label
is a tensor (single headed models).
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
head_name: name of the head. If provided, predictions, summary and metrics
keys will be suffixed by `"/" + head_name` and the default variable scope
will be `head_name`.
thresholds: thresholds for eval metrics, defaults to [.5]
metric_class_ids: List of class IDs for which we should report per-class
metrics. Must all be in the range `[0, n_classes)`.
loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as
parameter and returns a weighted scalar loss. `weights` should be
optional. See `tf.losses`
Returns:
An instance of `Head` for multi label classification.
Raises:
ValueError: If n_classes is < 2
ValueError: If loss_fn does not have expected signature.
"""
if n_classes < 2:
raise ValueError("n_classes must be > 1 for classification.")
if loss_fn:
_verify_loss_fn_args(loss_fn)
return _MultiLabelHead(
n_classes=n_classes,
label_name=label_name,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias,
head_name=head_name,
thresholds=thresholds,
metric_class_ids=metric_class_ids,
loss_fn=_wrap_custom_loss_fn(loss_fn) if loss_fn else None)
|
b1820584de6c4f9f987100c313793f41de0b73fc
| 3,639,564
|
def render_macros(line, macros):
"""Given a line of non-preprocessed code, and a list of macros, process macro expansions until done.
NOTE: Ignore comments"""
if line.startswith(";"):
return line
else:
while [macro_name for macro_name in macros.keys() if macro_name in line]:
for macro_name, macro_info in macros.items():
macro_body, params = macro_info
if params and macro_name in line:
line = render_parameterised_macro(line, macro_name, macro_body, params)
else:
line = line.replace(macro_name, macro_body)
return line
|
b78892d5708e9ca2d7e596d4f92ca7e8a6d17a64
| 3,639,565
|
def calc_qpos(x, bit = 16):
"""
引数の数値を表現できる最大のQ位置を返す。
:param x: float
:return: int
"""
for q in range(bit):
maxv = (2 ** (q - 1)) - 1
if x > maxv:
continue
return bit - q
return bit
|
b85b458989425bc5698547cae93d8729bd452e76
| 3,639,567
|
def person(request):
"""
Display information on the specified borrower (person)
"""
title = "Find a person"
if 'person_id' in request.GET:
person_id = request.GET['person_id']
try:
person = Person.objects.get(id_number=person_id)
title = unicode(person)
checked_out_items = person.item_set.all()
transaction_history = person.transaction_set.all()
except Person.DoesNotExist:
error_message = "No person with id number %s" % person_id
else:
message = "Enter or scan the person's ID number"
people = Person.objects.enrolled() # For clickable list of names
return render_to_response("person.html", locals())
|
53550d5ebefc6fb36601049f86c227b073d715d5
| 3,639,569
|
def _follow_word_from_node(node, word):
"""Follows the link with given word label from given node.
If there is a link from ``node`` with the label ``word``, returns the end
node and the log probabilities and transition IDs of the link. If there are
null links in between, returns the sum of the log probabilities and the
concatenation of the transition IDs.
:type node: Lattice.Node
:param node: node where to start searching
:type word: str
:param word: word to search for
:rtype: tuple of (Lattice.Node, float, float, str)
:returns: the end node of the link with the word label (or ``None`` if the
word is not found), and the total acoustic log probability, LM log
probability, and transition IDs of the path to the word
"""
if word not in node.word_to_link:
return (None, None, None, None)
link = node.word_to_link[word]
if link.word is not None:
return (link.end_node,
link.ac_logprob if link.ac_logprob is not None else 0.0,
link.lm_logprob if link.lm_logprob is not None else 0.0,
link.transitions if link.transitions is not None else "")
end_node, ac_logprob, lm_logprob, transitions = \
_follow_word_from_node(link.end_node, word)
if end_node is None:
return (None, None, None, None)
else:
if link.ac_logprob is not None:
ac_logprob += link.ac_logprob
if link.lm_logprob is not None:
lm_logprob += link.lm_logprob
if link.transitions is not None:
transitions += link.transitions
return (end_node, ac_logprob, lm_logprob, transitions)
|
a21a20ee4ad2d2e90420e30572d41647b3938f4b
| 3,639,570
|
def normalize_code(code):
"""Normalize object codes to avoid duplicates."""
return slugify(code, allow_unicode=False).upper() if code else None
|
27f3b079c4fb5cc9d87e310282838f77c4aed981
| 3,639,571
|
def get_weights_for_all(misfit_windows, stations, snr_threshold, cc_threshold, deltat_threshold, calculate_basic, print_info=True):
"""
get_weights_for_all: calculate weights.
"""
weights_for_all = {}
# * firstly we update the weight of snr,cc,deltat
for net_sta in misfit_windows:
weights_for_all[net_sta] = {}
for category in misfit_windows[net_sta]:
weights_for_all[net_sta][category] = []
for each_misfit_window in misfit_windows[net_sta][category].windows:
wsnr = cal_snr_weight(each_misfit_window,
snr_threshold[0], snr_threshold[1])
wcc = cal_cc_weight(each_misfit_window,
cc_threshold[0], cc_threshold[1])
wdeltat = cal_deltat_weight(each_misfit_window,
deltat_threshold[0], deltat_threshold[1])
weights_for_all[net_sta][category].append(
Weight(wsnr, wcc, wdeltat, None, None))
if(not calculate_basic):
# * get the station list for the geographical weighting (remove all 0 cases)
used_geographical_net_sta_list = []
for net_sta in weights_for_all:
status = False
for category in weights_for_all[net_sta]:
for each_weight in weights_for_all[net_sta][category]:
wsnr_cc_deltat = each_weight.snr * each_weight.cc * each_weight.deltat
if (wsnr_cc_deltat > 0):
status = True
if (status):
used_geographical_net_sta_list.append(net_sta)
# build stations_mapper
stations_mapper = get_stations_mapper(stations)
# get geographical weighting and update
geographical_weight_dict = cal_geographical_weight(
stations_mapper, used_geographical_net_sta_list, list(weights_for_all.keys()))
for net_sta in weights_for_all:
for category in weights_for_all[net_sta]:
for index, each_weight in enumerate(weights_for_all[net_sta][category]):
weights_for_all[net_sta][category][index] = each_weight._replace(
geographical=geographical_weight_dict[net_sta])
# * get the number of items for each category
# firstly we get all the category names
rep_net_sta = list(weights_for_all.keys())[0]
all_categories = list(weights_for_all[rep_net_sta].keys())
# here we should weight based on number of windows but not the number of usable stations.
number_each_category = {}
for each_category in all_categories:
number_each_category[each_category] = 0
for net_sta in weights_for_all:
for each_weight in weights_for_all[net_sta][each_category]:
# if this window is usable or not
wsnr_cc_deltat = each_weight.snr * each_weight.cc * each_weight.deltat
if (wsnr_cc_deltat > 0):
number_each_category[each_category] += 1
# get category weighting and update
# here we should weight based on number of windows but not the number of usable stations.
# * collect all events information
number_each_category_all_events = mpi_collect_category_number(
number_each_category, print_info=print_info)
weight_each_category = {}
for each_category in number_each_category_all_events:
weight_each_category[each_category] = cal_category_weight(
number_each_category_all_events[each_category])
for net_sta in weights_for_all:
for category in weights_for_all[net_sta]:
# * we will not use the category that not existing in this event
for index, each_weight in enumerate(weights_for_all[net_sta][category]):
weights_for_all[net_sta][category][index] = each_weight._replace(
category=weight_each_category[category])
return weights_for_all
|
0f64a968d00391ab18e2e91f2453b1ecc6a2a426
| 3,639,572
|
def tree_feature_importance(tree_model, X_train):
"""
Takes in a tree model and a df of training data and prints out
a ranking of the most important features and a bar graph of the values
Parameters
----------
tree_model: the trained model instance. Must have feature_importances_ and estimators_ attributes
X_train: DataFrame that the model was training on
Returns
-------
This function currently does not return any values, but that may change
"""
importances = tree_model.feature_importances_
std = np.std([tree.feature_importances_ for tree in tree_model.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
features = X_train.columns.to_list()
# Print the feature ranking
print("Feature ranking:")
print()
ordered_features = []
for f in range(X_train.shape[1]):
#feature_name = features[indices[f]]
print(f'{f + 1}. {features[indices[f]]}, {importances[indices[f]]}')
ordered_features.append(features[indices[f]])
print()
# Plot the impurity-based feature importances of the forest
fig = plt.figure()
plt.title("Feature importances")
plt.bar(range(X_train.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X_train.shape[1]), ordered_features, rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.show()
return fig
|
4c347a7bad8d541c3166942f51efa6f18882bde5
| 3,639,573
|
def chhop_microseconds(delta: timedelta) -> timedelta:
"""
chop microseconds from timedelta object.
:param delta:
:return:
"""
return delta - timedelta(microseconds=delta.microseconds)
|
fac46d727540f607164029d23324e678a276b296
| 3,639,574
|
def __renumber(dictionary) :
"""Renumber the values of the dictionary from 0 to n
"""
count = 0
ret = dictionary.copy()
new_values = dict([])
for key in dictionary.keys() :
value = dictionary[key]
new_value = new_values.get(value, -1)
if new_value == -1 :
new_values[value] = count
new_value = count
count = count + 1
ret[key] = new_value
return ret
|
a4611f04360b2c03ac17f22e349371f58f65ed9b
| 3,639,575
|
def get_user_list():
"""
return user list if the given
authenticated user has admin permission
:return:
"""
if requires_perm() is True:
return jsonify({'user_list': USER_LIST,
'successful': True}), 200
return jsonify({'message': 'You are not '
'permitted to access this resource',
'successful': False}), 403
|
38598ed7d54dd4d93a8b22c6344f22736bc3b805
| 3,639,576
|
def rm_words(user_input, stop_words):
"""Sanitize using intersection and list.remove()"""
# Downsides:
# - Looping over list while removing from it?
# http://stackoverflow.com/questions/1207406/remove-items-from-a-list-while-iterating-in-python
stop_words = set(stop_words)
for sw in stop_words.intersection(user_input):
while sw in user_input:
user_input.remove(sw)
return user_input
|
aead9c1cd5586bb20611ea8fbf57aa66aa3f5ede
| 3,639,577
|
import struct
def getValueForCoordinate(inputFile, lon, lat, noDataAsNone):
"""
Reads the pixel value of a GeoTIFF for a geographic coordinate
:param inputFile: full path to input GeoTIFF file
:type inputFile: str
:param lon: longitude
:type lon: float
:param lat: latitude
:type lat: float
:param noDataAsNone: switch to decide wether to return NODATA as None or the value stored in the GeoTIFF.
:type noDataAsNone: bool
:returns: pixel value of coordinate
:rtype: float
"""
inputRaster = gdal.Open(inputFile)
geotransform = inputRaster.GetGeoTransform()
rb = inputRaster.GetRasterBand(1)
noDataVal = rb.GetNoDataValue()
# this converts from map coordinates to raster coordinates
# this will only work for CRS without rotation! If this is needed, we have to do some matrix
# multiplication magic here ;-)
px = int((lat - geotransform[0]) / geotransform[1]) # (pos - origin) / pixelsize
py = int((lon - geotransform[3]) / geotransform[5])
structval = rb.ReadRaster(px, py, 1, 1, buf_type=gdal.GDT_Float64)
val = struct.unpack('d', structval) # this unpacks a C data structure into a Python value.
if noDataAsNone and val[0] == noDataVal:
return None
else:
return val[0]
|
4bfe9bf3de3dc277a84dad9e1fa523b2213b9b6d
| 3,639,578
|
def convert_xrandr_to_index(xrandr_val: float):
"""
:param xrandr_val: usually comes from the
config value directly, as a string (it's
just the nature of directly retrieving
information from a .ini file)
:return: an index representation
of the current brightness level, useful
for switch functions (where we switch
based on indexes and not string values)
Example: 0.2 is converted to 1
"""
return int(xrandr_val * 10 - 1)
|
eed5f7a6c79f7dcb29c627521d31dc59e5cd430b
| 3,639,579
|
def get_message(name, value):
"""Provides the message for a standard Python exception"""
if hasattr(value, "msg"):
return f"{name}: {value.msg}\n"
return f"{name}: {value}\n"
|
7755c63cc9a16e70ad9b0196d662ef603d82b5f6
| 3,639,580
|
import math
def calculate_page_info(offset, total_students):
"""
Takes care of sanitizing the offset of current page also calculates offsets for next and previous page
and information like total number of pages and current page number.
:param offset: offset for database query
:return: tuple consist of page number, query offset for next and previous pages and valid offset
"""
# validate offset.
if not (isinstance(offset, int) or offset.isdigit()) or int(offset) < 0 or int(offset) >= total_students:
offset = 0
else:
offset = int(offset)
# calculate offsets for next and previous pages.
next_offset = offset + MAX_STUDENTS_PER_PAGE_GRADE_BOOK
previous_offset = offset - MAX_STUDENTS_PER_PAGE_GRADE_BOOK
# calculate current page number.
page_num = ((offset / MAX_STUDENTS_PER_PAGE_GRADE_BOOK) + 1)
# calculate total number of pages.
total_pages = int(math.ceil(float(total_students) / MAX_STUDENTS_PER_PAGE_GRADE_BOOK)) or 1
if previous_offset < 0 or offset == 0:
# We are at first page, so there's no previous page.
previous_offset = None
if next_offset >= total_students:
# We've reached the last page, so there's no next page.
next_offset = None
return {
"previous_offset": previous_offset,
"next_offset": next_offset,
"page_num": page_num,
"offset": offset,
"total_pages": total_pages
}
|
e9af8bd4f511f42f30f60685d68fb043a54668de
| 3,639,581
|
def download_office(load=True): # pragma: no cover
"""Download office dataset.
Parameters
----------
load : bool, optional
Load the dataset after downloading it when ``True``. Set this
to ``False`` and only the filename will be returned.
Returns
-------
pyvista.StructuredGrid or str
DataSet or filename depending on ``load``.
Examples
--------
>>> from pyvista import examples
>>> dataset = examples.download_office()
>>> dataset.contour().plot()
See :ref:`clip_with_plane_box_example` for an example using this
dataset.
"""
return _download_and_read('office.binary.vtk', load=load)
|
5dd307bf815e7d7cbaef81b7542728b446b7f2cb
| 3,639,584
|
import time
def test_duplicated_topics(host):
"""
Check if can remove topics options
"""
# Given
duplicated_topic_name = get_topic_name()
def get_topic_config():
topic_configuration = topic_defaut_configuration.copy()
topic_configuration.update({
'name': duplicated_topic_name,
'options': {
'retention.ms': 66574936,
'flush.ms': 564939
}
})
return topic_configuration
topic_configuration = {
'topics': [
get_topic_config(),
get_topic_config()
]
}
# When
results = ensure_kafka_topics(
host,
topic_configuration
)
time.sleep(0.3)
# Then
for result in results:
assert not result['changed']
assert 'duplicated topics' in result['msg']
|
68524bf675da4b52f05ee31ae35def7b461572cd
| 3,639,585
|
def register(request):
"""
注册账号界面
"""
message = ""
if request.session.get('is_login', None):
return redirect('/account/')
if request.method == 'POST':
username = request.POST.get('username')
email = request.POST.get('email')
password1 = request.POST.get('password1')
password2 = request.POST.get('password2')
message = "请检查填写的内容!"
if _makesure_password(password1, password2):
message = _makesure_password(password1, password2)
else:
same_username = User.objects.filter(username=username)
same_email = User.objects.filter(email=email)
if same_username:
message = '用户名已经存在!'
elif same_email:
message = '该邮箱已经被注册了!'
else:
new_user = User()
new_user.username = username
new_user.email = email
new_user.password = _hash_code(password1)
new_user.save()
code = _make_confirm_string(new_user)
_send_email(email, code)
message = '请前往邮箱进行确认!'
return render(request, 'account/login.html', {'message': message})
captcha_form = forms.captchaForm(request.POST)
content = {'captcha_form': captcha_form,
'message': message, 'page_register': True}
return render(request, 'account/register.html', content)
|
2a3963f6549cd20f4d994cbab336f0e0eb91e685
| 3,639,586
|
def cosh(x):
"""Evaluates the hyperbolic cos of an interval"""
np = import_module('numpy')
if isinstance(x, (int, float)):
return interval(np.cosh(x), np.cosh(x))
elif isinstance(x, interval):
#both signs
if x.start < 0 and x.end > 0:
end = max(np.cosh(x.start), np.cosh(x.end))
return interval(1, end, is_valid=x.is_valid)
else:
#Monotonic
start = np.cosh(x.start)
end = np.cosh(x.end)
return interval(start, end, is_valid=x.is_valid)
else:
raise NotImplementedError
|
dd362392cce1aae2d19c589d49559b7b165c9f1e
| 3,639,587
|
def tree_intersection(tree_one, tree_two):
"""Checks for duplicate values between two trees and returns those values as a set."""
first_values = []
second_values = []
table = HashTable()
dupes = set([])
tree_one.pre_order(first_values.append)
tree_two.pre_order(second_values.append)
for value in first_values:
table.set(value, value)
for value in second_values:
if table.get(value):
dupes.add(value)
if len(dupes) == 0:
return 'There are no duplicates.'
return dupes
|
06ab08015cb02747fd3fea4055217a1dbeefc4b8
| 3,639,588
|
def new():
"""Deliver new-question interface."""
return render_template('questionNew.html', question_id='')
|
bca210aa5661d034256c6ef06209f45ea4923aa9
| 3,639,589
|
def merge_dict_recursive(base, other):
"""Merges the *other* dict into the *base* dict. If any value in other is itself a dict and the base also has a dict for the same key, merge these sub-dicts (and so on, recursively).
>>> base = {'a': 1, 'b': {'c': 3}}
>>> other = {'x': 4, 'b': {'y': 5}}
>>> want = {'a': 1, 'x': 4, 'b': {'c': 3, 'y': 5}}
>>> got = merge_dict_recursive(base, other)
>>> got == want
True
>>> base == want
True
"""
for (key, value) in list(other.items()):
if (isinstance(value, dict) and
(key in base) and
(isinstance(base[key], dict))):
base[key] = merge_dict_recursive(base[key], value)
else:
base[key] = value
return base
|
10ea2bbcf7d2ee330c784efff684974339d48b5d
| 3,639,590
|
def two_points_line(feature):
"""Convert a Polyline to a Line composed of only two points."""
features = []
coords = feature['geometry']['coordinates']
for i in range(0, len(coords) - 1):
segment_coords = [coords[i], coords[i+1]]
geom = geojson.LineString(segment_coords)
features.append(geojson.Feature(geometry=geom))
return features
|
49c0197c6a072c690a2507df4b9a517a95c9919e
| 3,639,591
|
def transform_world_to_camera(poses_set, cams, ncams=4):
"""
Project 3d poses from world coordinate to camera coordinate system
Args
poses_set: dictionary with 3d poses
cams: dictionary with cameras
ncams: number of cameras per subject
Return:
t3d_camera: dictionary with 3d poses in camera coordinate
"""
t3d_camera = {}
for t3dk in sorted(poses_set.keys()):
subj, action, seqname = t3dk
t3d_world = poses_set[t3dk]
for c in range(ncams):
R, T, f, c, k, p, name = cams[(subj, c + 1)]
camera_coord = world_to_camera_frame(np.reshape(t3d_world, [-1, 3]), R, T)
camera_coord = np.reshape(camera_coord, [-1, len(H36M_NAMES) * 3])
sname = seqname[:-3] + "." + name + ".h5" # e.g.: Waiting 1.58860488.h5
t3d_camera[(subj, action, sname)] = camera_coord
return t3d_camera
|
c67c61e7746fd67ca62a848b3641e27e068348a5
| 3,639,592
|
def div66():
"""
Returns the divider OOOOOOOOOOOO
:return: divider66
"""
return divider66
|
fbade8a4b3aa445985686c180f3cbad71832498f
| 3,639,594
|
def extract_square_from_file(image_number=1):
"""Given a number of the image file return a cropped sudoku."""
image_string = '/home/james/Documents/projects/sudoku/img/'
image_string += str(image_number) + '.jpg'
binary = read_binary(image_string)
threshold = get_threshold(binary)
square = get_square_coordinates(threshold)
game = extract_sudoku(square, threshold)
return game
|
def244273d2e329b771a61b5fb4fb980e120831d
| 3,639,595
|
def sequence_extractor(graph, path):
"""
returns the sequence of the path
:param graph: a graph object
:param path: a list of nodes ordered according to the path
:return: sequence of the path
"""
# check if path exists
if len(path) == 1:
return graph.nodes[path[0]].seq
elif not path_checker(graph, path):
return ""
if graph.nodes[path[0]].in_direction(graph.nodes[path[1]].id, 0):
direction = 0
sequence = reverse_complement(graph.nodes[path[0]].seq)
elif graph.nodes[path[0]].in_direction(graph.nodes[path[1]].id, 1):
direction = 1
sequence = graph.nodes[path[0]].seq
for i in range(len(path) - 1):
current_node = graph.nodes[path[i]]
next_node = graph.nodes[path[i+1]]
if current_node.in_direction(next_node.id, direction):
direction, overlap = next_direction(current_node, next_node, direction)
# if next direction is one this means current node connects to
# next node from 0 so I don't need to take the reverse complement
# Otherwise I need to
if direction == 1:
sequence += next_node.seq[overlap:]
else:
sequence += reverse_complement(next_node.seq)[overlap:]
return sequence
|
1f83fcbf75add7234f9395e801d22d95d55804a9
| 3,639,597
|
from typing import Optional
from typing import List
from typing import Union
from typing import Literal
from typing import Tuple
from typing import Dict
def plot_matplotlib(
tree: CassiopeiaTree,
depth_key: Optional[str] = None,
meta_data: Optional[List[str]] = None,
allele_table: Optional[pd.DataFrame] = None,
indel_colors: Optional[pd.DataFrame] = None,
indel_priors: Optional[pd.DataFrame] = None,
orient: Union[Literal["up", "down", "left", "right"], float] = 90.0,
extend_branches: bool = True,
angled_branches: bool = True,
add_root: bool = False,
figsize: Tuple[float, float] = (7.0, 7.0),
colorstrip_width: Optional[float] = None,
colorstrip_spacing: Optional[float] = None,
clade_colors: Optional[Dict[str, Tuple[float, float, float]]] = None,
internal_node_kwargs: Optional[Dict] = None,
leaf_kwargs: Optional[Dict] = None,
branch_kwargs: Optional[Dict] = None,
colorstrip_kwargs: Optional[Dict] = None,
continuous_cmap: Union[str, mpl.colors.Colormap] = "viridis",
vmin: Optional[float] = None,
vmax: Optional[float] = None,
categorical_cmap: Union[str, mpl.colors.Colormap] = "tab10",
value_mapping: Optional[Dict[str, int]] = None,
ax: Optional[plt.Axes] = None,
random_state: Optional[np.random.RandomState] = None,
) -> Tuple[plt.Figure, plt.Axes]:
"""Generate a static plot of a tree using Matplotlib.
Args:
tree: The CassiopeiaTree to plot.
depth_key: The node attribute to use as the depth of the nodes. If
not provided, the distances from the root is used by calling
`tree.get_distances`.
meta_data: Meta data to plot alongside the tree, which must be columns
in the CassiopeiaTree.cell_meta variable.
allele_table: Allele table to plot alongside the tree.
indel_colors: Color mapping to use for plotting the alleles for each
cell. Only necessary if `allele_table` is specified.
indel_priors: Prior probabilities for each indel. Only useful if an
allele table is to be plotted and `indel_colors` is None.
orient: The orientation of the tree. Valid arguments are `left`, `right`,
`up`, `down` to display a rectangular plot (indicating the direction
of going from root -> leaves) or any number, in which case the
tree is placed in polar coordinates with the provided number used
as an angle offset. Defaults to 90.
extend_branches: Extend branch lengths such that the distance from the
root to every node is the same. If `depth_key` is also provided, then
only the leaf branches are extended to the deepest leaf.
angled_branches: Display branches as angled, instead of as just a
line from the parent to a child.
add_root: Add a root node so that only one branch connects to the
start of the tree. This node will have the name `synthetic_root`.
figsize: Size of the plot. Defaults to (7., 7.,)
colorstrip_width: Width of the colorstrip. Width is defined as the
length in the direction of the leaves. Defaults to 5% of the tree
depth.
colorstrip_spacing: Space between consecutive colorstrips. Defaults to
half of `colorstrip_width`.
clade_colors: Dictionary containing internal node-color mappings. These
colors will be used to color all the paths from this node to the
leaves the provided color.
internal_node_kwargs: Keyword arguments to pass to `plt.scatter` when
plotting internal nodes.
leaf_kwargs: Keyword arguments to pass to `plt.scatter` when
plotting leaf nodes.
branch_kwargs: Keyword arguments to pass to `plt.plot` when plotting
branches.
colorstrip_kwargs: Keyword arguments to pass to `plt.fill` when plotting
colorstrips.
continuous_cmap: Colormap to use for continuous variables. Defaults to
`viridis`.
vmin: Value representing the lower limit of the color scale. Only applied
to continuous variables.
vmax: Value representing the upper limit of the color scale. Only applied
to continuous variables.
categorical_cmap: Colormap to use for categorical variables. Defaults to
`tab10`.
value_mapping: An optional dictionary containing string values to their
integer mappings. These mappings are used to assign colors by
calling the `cmap` with the designated integer mapping. By default,
the values are assigned pseudo-randomly (whatever order the set()
operation returns). Only applied for categorical variables.
ax: Matplotlib axis to place the tree. If not provided, a new figure is
initialized.
random_state: A random state for reproducibility
Returns:
If `ax` is provided, `ax` is returned. Otherwise, a tuple of (fig, ax)
of the newly initialized figure and axis.
"""
is_polar = isinstance(orient, (float, int))
(
node_coords,
branch_coords,
node_colors,
branch_colors,
colorstrips,
) = place_tree_and_annotations(
tree,
depth_key,
meta_data,
allele_table,
indel_colors,
indel_priors,
orient,
extend_branches,
angled_branches,
add_root,
colorstrip_width,
colorstrip_spacing,
clade_colors,
continuous_cmap,
vmin,
vmax,
categorical_cmap,
value_mapping,
random_state,
)
fig = None
if ax is None:
fig, ax = plt.subplots(figsize=figsize, tight_layout=True)
ax.set_axis_off()
# Plot all nodes
_leaf_kwargs = dict(x=[], y=[], s=5, c="black")
_node_kwargs = dict(x=[], y=[], s=0, c="black")
_leaf_kwargs.update(leaf_kwargs or {})
_node_kwargs.update(internal_node_kwargs or {})
for node, (x, y) in node_coords.items():
if node in node_colors:
continue
if is_polar:
x, y = utilities.polar_to_cartesian(x, y)
if tree.is_leaf(node):
_leaf_kwargs["x"].append(x)
_leaf_kwargs["y"].append(y)
else:
_node_kwargs["x"].append(x)
_node_kwargs["y"].append(y)
ax.scatter(**_leaf_kwargs)
ax.scatter(**_node_kwargs)
_leaf_colors = []
_node_colors = []
_leaf_kwargs.update({"x": [], "y": []})
_node_kwargs.update({"x": [], "y": []})
for node, color in node_colors.items():
x, y = node_coords[node]
if is_polar:
x, y = utilities.polar_to_cartesian(x, y)
if tree.is_leaf(node):
_leaf_kwargs["x"].append(x)
_leaf_kwargs["y"].append(y)
_leaf_colors.append(color)
else:
_node_kwargs["x"].append(x)
_node_kwargs["y"].append(y)
_node_colors.append(color)
_leaf_kwargs["c"] = _leaf_colors
_node_kwargs["c"] = _node_colors
ax.scatter(**_leaf_kwargs)
ax.scatter(**_node_kwargs)
# Plot all branches
_branch_kwargs = dict(linewidth=1, c="black")
_branch_kwargs.update(branch_kwargs or {})
for branch, (xs, ys) in branch_coords.items():
if branch in branch_colors:
continue
if is_polar:
xs, ys = utilities.polars_to_cartesians(xs, ys)
ax.plot(xs, ys, **_branch_kwargs)
for branch, color in branch_colors.items():
_branch_kwargs["c"] = color
xs, ys = branch_coords[branch]
if is_polar:
xs, ys = utilities.polars_to_cartesians(xs, ys)
ax.plot(xs, ys, **_branch_kwargs)
# Colorstrips
_colorstrip_kwargs = dict(linewidth=0)
_colorstrip_kwargs.update(colorstrip_kwargs or {})
for colorstrip in colorstrips:
# Last element is text, but this can not be shown in static plotting.
for xs, ys, c, _ in colorstrip.values():
_colorstrip_kwargs["c"] = c
if is_polar:
xs, ys = utilities.polars_to_cartesians(xs, ys)
ax.fill(xs, ys, **_colorstrip_kwargs)
return (fig, ax) if fig is not None else ax
|
a26a8146f4f5fb3bc3564367774740645e04caf3
| 3,639,598
|
import jinja2
from datetime import datetime
import time
import hashlib
def print_order(order: Order, user_id: int = 0):
"""
订单打印
:param order:
:param user_id:
:return:
"""
shop_id = order.shop.id
shop = get_shop_by_shop_id(shop_id)
receipt_config = get_receipt_by_shop_id(shop_id)
printer = ylyPrinter()
template = jinja2.Template(ORDER_TPL_58)
body = template.render(
order=order,
print_time=make_aware(datetime.datetime.now()).strftime("%Y-%m-%d %H:%M:%S"),
shop=shop,
receipt_config=receipt_config,
)
printer_config = get_printer_by_shop_id(shop_id)
if not printer_config:
return False, "请先添加打印机"
partner = "1693" # 用户ID
apikey = "664466347d04d1089a3d373ac3b6d985af65d78e" # API密钥
timenow = str(int(time.time())) # 当前时间戳
machine_code = printer_config.code # 打印机终端号 520
mkey = printer_config.key # 打印机密钥 110110
if machine_code and mkey:
sign = "{}machine_code{}partner{}time{}{}".format(
apikey, machine_code, partner, timenow, mkey
)
sign = hashlib.md5(sign.encode("utf-8")).hexdigest().upper()
else:
return False, "打印机配置错误"
data = {
"partner": partner,
"machine_code": machine_code,
"content": body,
"time": timenow,
"sign": sign,
}
success, msg = printer.send_request(data, receipt_config.copies)
if success and user_id >= 0:
log_info = {
"order_num": order.order_num,
"shop_id": order.shop.id,
"operator_id": user_id,
"operate_type": OrderLogType.PRINT,
}
create_order_log(log_info)
return success, msg
|
bdfdbe51b854093172f2ab2c4b9d1abd15856847
| 3,639,599
|
def get_IoU_from_matches(match_pred2gt, matched_classes, ovelaps):
"""
if given an image, claculate the IoU of the segments in the image
:param match_pred2gt: maps index of predicted segment to index of ground truth segment
:param matched_classes: maps index of predicted segment to class number
:param ovelaps: maps [predicted segment index, gt segment index] to the IoU value of the segments
:return:
1. IoUs - IoU for all segments
2. IoUs_classes - mean IoU per class
"""
IoUs = [ [] for _ in range(5) ]
match_pred2gt = match_pred2gt.astype(np.int32)
for pred, gt in enumerate(match_pred2gt):
if gt < 0:
continue
IoUs[matched_classes[pred]].append(ovelaps[pred, gt])
# mean segments's IoU according to classes
IoUs_classes = np.zeros((5, 1))
for class_idx, lst in enumerate(IoUs):
if not lst:
continue
arr = np.array(lst)
IoUs_classes[class_idx] = (np.mean(arr))
return IoUs, IoUs_classes
|
2488c590d86a639898fc1e84c6a6d24afb7c2df4
| 3,639,600
|
def id_queue(obs_list, prediction_url='http://plants.deep.ifca.es/api', shuffle=False):
"""
Returns generator of identifications via buffer.
Therefore we perform the identification query for the nxt observation
while the user is still observing the current information.
"""
print "Generating the identification buffer ..."
if shuffle:
indices = np.arange(len(obs_list))
np.random.shuffle(indices)
def gen(obs_list):
for obs in obs_list:
yield make_prediction(obs, prediction_url)
return buffered_gen_threaded(gen(obs_list))
|
37773fc9d688b000a1d02b083f89e0b4996a52ea
| 3,639,601
|
def periodogram(x, nfft=None, fs=1):
"""Compute the periodogram of the given signal, with the given fft size.
Parameters
----------
x : array-like
input signal
nfft : int
size of the fft to compute the periodogram. If None (default), the
length of the signal is used. if nfft > n, the signal is 0 padded.
fs : float
Sampling rate. By default, is 1 (normalized frequency. e.g. 0.5 is the
Nyquist limit).
Returns
-------
pxx : array-like
The psd estimate.
fgrid : array-like
Frequency grid over which the periodogram was estimated.
Examples
--------
Generate a signal with two sinusoids, and compute its periodogram:
>>> fs = 1000
>>> x = np.sin(2 * np.pi * 0.1 * fs * np.linspace(0, 0.5, 0.5*fs))
>>> x += np.sin(2 * np.pi * 0.2 * fs * np.linspace(0, 0.5, 0.5*fs))
>>> px, fx = periodogram(x, 512, fs)
Notes
-----
Only real signals supported for now.
Returns the one-sided version of the periodogram.
Discrepency with matlab: matlab compute the psd in unit of power / radian /
sample, and we compute the psd in unit of power / sample: to get the same
result as matlab, just multiply the result from talkbox by 2pi"""
x = np.atleast_1d(x)
n = x.size
if x.ndim > 1:
raise ValueError("Only rank 1 input supported for now.")
if not np.isrealobj(x):
raise ValueError("Only real input supported for now.")
if not nfft:
nfft = n
if nfft < n:
raise ValueError("nfft < signal size not supported yet")
pxx = np.abs(fft(x, nfft)) ** 2
if nfft % 2 == 0:
pn = nfft / 2 + 1
else:
pn = (nfft + 1 )/ 2
fgrid = np.linspace(0, fs * 0.5, pn)
return pxx[:pn] / (n * fs), fgrid
|
899cacc316cf80e79871d01b0c0b3a84deda8042
| 3,639,602
|
def handle_429(e):
"""Renders full error page for too many site queries"""
html = render.html("429")
client_addr = get_ipaddr()
count_ratelimit.labels(e, client_addr).inc()
logger.error(f"Error: {e}, Source: {client_addr}")
return html, 429
|
b7a27e55f753dc254e19d1b51ddb169c8e683a2c
| 3,639,603
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.