content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
async def async_setup_entry(hass, config_entry):
"""Konfigurowanie integracji na podstawie wpisu konfiguracyjnego."""
_LOGGER.info("async_setup_entry " + str(config_entry))
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
)
return True
|
0bbef08a544cccede0efd18fbbf9a0b7dddfbec9
| 3,646,597
|
def analyse_branching(geom,ordering_system,conversionFactor,voxelSize):
""" Does a branching analysis on the tree defined by 'geom'
Inputs:
- geom: A geometry structure consisting of element list, node location and radii/lengths
- ordering_system: the ordering system to be used in analysis (e.g. 'strahler', 'horsfield'
Returns: Prints to screen a table of branching properties (one per generation, one per order) and overall summary statistics
"""
elem_cnct = pg_utilities.element_connectivity_1D(geom['nodes'], geom['elems'])
orders = evaluate_orders(geom['nodes'], geom['elems'])
# Find Results
branchGeom = arrange_by_branches(geom, elem_cnct['elem_up'], orders[ordering_system],orders['generation'])
[geom, branchGeom] = find_branch_angles(geom, orders, elem_cnct, branchGeom, voxelSize, conversionFactor)
major_minor_results=major_minor(geom, elem_cnct['elem_down']) #major/minor child stuff
# tabulate data
generation_summary_statistics(geom, orders, major_minor_results)
summary_statistics(branchGeom, geom, orders, major_minor_results,'strahler')
return geom
|
25c72b51094c59317e167cca6662c5ccfa8805b0
| 3,646,598
|
def remove_start(s: str) -> str:
"""
Clear string from start '-' symbol
:param s:
:return:
"""
return s[1:] if s.startswith('-') else s
|
03504a3094798f6582bcae40233f7215e8d4d780
| 3,646,599
|
async def get_user_requests(user, groups):
"""Get requests relevant to a user.
A user sees requests they have made as well as requests where they are a
secondary approver
"""
dynamo_handler = UserDynamoHandler(user)
all_requests = await dynamo_handler.get_all_requests()
query = {
"domains": config.get("dynamo.get_user_requests.domains", []),
"filters": [
{
"field": "extendedattributes.attributeName",
"values": ["secondary_approvers"],
"operator": "EQUALS",
},
{
"field": "extendedattributes.attributeValue",
"values": groups + [user],
"operator": "EQUALS",
},
],
"size": 500,
}
approver_groups = await auth.query_cached_groups(query=query)
approver_groups = [g["name"] for g in approver_groups]
requests = []
for req in all_requests:
if user == req.get("username", ""):
requests.append(req)
continue
group = req.get("group")
if group is None:
continue
if group in approver_groups + [user]:
requests.append(req)
return requests
|
ca7a9f8baa433a2ed1828d1f0c8c3dd791d6ea12
| 3,646,600
|
def normalize_column(df_column, center_at_zero=False):
"""Converts an unnormalized dataframe column to a normalized
1D numpy array
Default: normalizes between [0,1]
(center_at_zero == True): normalizes between [-1,1] """
normalized_array = np.array(df_column, dtype="float64")
amax, amin = np.max(normalized_array), np.min(normalized_array)
normalized_array -= amin
if center_at_zero:
normalized_array *= 2.0 / (amax - amin)
normalized_array -= 1.0
else:
normalized_array *= 1.0 / (amax - amin)
return normalized_array
|
4da61a899097812fc22bae1f93addbbcd861f786
| 3,646,604
|
def apply_mask(image, mask):
"""Apply the given mask to the image.
"""
image = image.astype(np.uint8)
image = np.array(image)
for c in range(3):
image[:, :, c] = np.where(mask == 1,
cv2.blur(image[:, :, c],(40,40)),
image[:, :, c])
return image
|
d14516edadd60bf5ba56d0ea77a8c6582a847e8e
| 3,646,606
|
from numpy import zeros, sqrt, where, pi, mean, arange, histogram
def pairCorrelationFunction_3D(x, y, z, S, rMax, dr):
"""Compute the three-dimensional pair correlation function for a set of
spherical particles contained in a cube with side length S. This simple
function finds reference particles such that a sphere of radius rMax drawn
around the particle will fit entirely within the cube, eliminating the need
to compensate for edge effects. If no such particles exist, an error is
returned. Try a smaller rMax...or write some code to handle edge effects! ;)
Arguments:
x an array of x positions of centers of particles
y an array of y positions of centers of particles
z an array of z positions of centers of particles
S length of each side of the cube in space
rMax outer diameter of largest spherical shell
dr increment for increasing radius of spherical shell
Returns a tuple: (g, radii, interior_indices)
g(r) a numpy array containing the correlation function g(r)
radii a numpy array containing the radii of the
spherical shells used to compute g(r)
reference_indices indices of reference particles
"""
# Find particles which are close enough to the cube center that a sphere of radius
# rMax will not cross any face of the cube
bools1 = x > (-S/2 +rMax)
bools2 = x < (S/2 - rMax)
bools3 = y > (-S/2 +rMax)
bools4 = y < (S/2 - rMax)
bools5 = z > (-S/2 + rMax)
bools6 = z < (S/2 - rMax)
interior_indices, = where(bools1 * bools2 * bools3 * bools4 * bools5 * bools6)
num_interior_particles = len(interior_indices)
if num_interior_particles < 1:
raise RuntimeError ("No particles found for which a sphere of radius rMax\
will lie entirely within a cube of side length S. Decrease rMax\
or increase the size of the cube.")
edges = arange(0., rMax + 1.1 * dr, dr)
num_increments = len(edges) - 1
g = zeros([num_interior_particles, num_increments])
radii = zeros(num_increments)
numberDensity = len(x) / S**3
# Compute pairwise correlation for each interior particle
for p in range(num_interior_particles):
index = interior_indices[p]
d = sqrt((x[index] - x)**2 + (y[index] - y)**2 + (z[index] - z)**2)
d[index] = 2 * rMax
(result, bins) = histogram(d, bins=edges, normed=False)
g[p,:] = result / numberDensity
# Average g(r) for all interior particles and compute radii
g_average = zeros(num_increments)
for i in range(num_increments):
radii[i] = (edges[i] + edges[i+1]) / 2.
rOuter = edges[i + 1]
rInner = edges[i]
g_average[i] = mean(g[:, i]) / (4.0 / 3.0 * pi * (rOuter**3 - rInner**3))
return (g_average, radii, interior_indices)
# Number of particles in shell/total number of particles/volume of shell/number density
# shell volume = 4/3*pi(r_outer**3-r_inner**3)
|
cec343757af93b6a49b6ecc856e31f311518a109
| 3,646,607
|
import numpy
import copy
def _find_endpoints_of_skeleton(binary_image_matrix):
"""Finds endpoints of skeleton.
:param binary_image_matrix: M-by-N numpy array of integers in 0...1. If
binary_image_matrix[i, j] = 1, grid cell [i, j] is part of the skeleton.
:return: binary_endpoint_matrix: M-by-N numpy array of integers in 0...1.
If binary_endpoint_matrix[i, j] = 1, grid cell [i, j] is an endpoint of
the skeleton.
"""
if numpy.sum(binary_image_matrix) == 1:
return copy.deepcopy(binary_image_matrix)
filtered_image_matrix = numpy.pad(
binary_image_matrix, pad_width=2, mode='constant', constant_values=0)
filtered_image_matrix = cv2.filter2D(
filtered_image_matrix.astype(numpy.uint8), -1,
KERNEL_MATRIX_FOR_ENDPOINT_FILTER)
filtered_image_matrix = filtered_image_matrix[2:-2, 2:-2]
endpoint_flag_matrix = numpy.full(binary_image_matrix.shape, 0, dtype=int)
endpoint_flag_matrix[
filtered_image_matrix == FILTERED_VALUE_AT_ENDPOINT] = 1
return endpoint_flag_matrix
|
d6ea3aac54c95e658753f7e8a2a40762ffc32706
| 3,646,609
|
def get_today_timestamp():
"""
Get the formatted timestamp for today
"""
today = dt.datetime.today()
stamp = today.strftime("%d") + today.strftime("%b") + today.strftime("%Y")
return stamp
|
8fab2ad826c89b66acb083a1a1f07d340cf3ed9b
| 3,646,610
|
import copy
def massAvg(massList, method='weighted', weights=None):
"""
Compute the average mass of massList according to method.
If method=weighted but weights were not properly defined,
switch method to harmonic.
If massList contains a zero mass, switch method to mean.
:parameter method: possible values: harmonic, mean, weighted
:parameter weights: weights of elements (only for weighted average)
"""
if not massList:
return massList
if massList.count(massList[0]) == len(massList):
return massList[0]
if method == 'weighted' and (not weights or len(weights) != len(massList)):
method = 'harmonic'
flatList = [ mass / GeV for mass in _flattenList(massList)]
if method == 'harmonic' and 0. in flatList:
method = 'mean'
for mass in massList:
if len(mass) != len(massList[0]) \
or len(mass[0]) != len(massList[0][0]) \
or len(mass[1]) != len(massList[0][1]):
logger.error('Mass shape mismatch in mass list:\n' + str(mass) +
' and ' + str(massList[0]))
raise SModelSError()
avgmass = copy.deepcopy(massList[0])
for ib, branch in enumerate(massList[0]):
for ival in enumerate(branch):
vals = [ float(mass[ib][ival[0]] / GeV) for mass in massList]
if method == 'mean':
avg = np.mean(vals)
elif method == 'harmonic':
avg = stats.hmean(vals)
elif method == 'weighted':
weights = [ float(weight) for weight in weights ]
avg = np.average(vals,weights=weights)
avgmass[ib][ival[0]] = float(avg)*GeV
return avgmass
|
a64fd8a0ae8d0c8c6c6402741b6080a17e86c19f
| 3,646,611
|
def listListenerPortsOnServer(nodeName, serverName):
"""List all of the Listener Ports on the specified Node/Server."""
m = "listListenerPortsOnServer:"
sop(m,"nodeName = %s, serverName = %s" % (nodeName, serverName))
cellName = getCellName() # e.g. 'xxxxCell01'
lPorts = _splitlines(AdminControl.queryNames("type=ListenerPort,cell=%s,node=%s,process=%s,*" % (cellName, nodeName, serverName)))
sop(m,"returning %s" % (lPorts))
return lPorts
|
f2fdadcf06d47e55edaab44ca9e040cf8e116ae0
| 3,646,612
|
def forward_propagation_with_dropout(X, parameters, keep_prob=0.5):
"""
实现具有随机舍弃节点的前向传播。
LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID.
参数:
X - 输入数据集,维度为(2,示例数)
parameters - 包含参数“W1”,“b1”,“W2”,“b2”,“W3”,“b3”的python字典:
W1 - 权重矩阵,维度为(20,2)
b1 - 偏向量,维度为(20,1)
W2 - 权重矩阵,维度为(3,20)
b2 - 偏向量,维度为(3,1)
W3 - 权重矩阵,维度为(1,3)
b3 - 偏向量,维度为(1,1)
keep_prob - 随机删除的概率,实数
返回:
A3 - 最后的激活值,维度为(1,1),正向传播的输出
cache - 存储了一些用于计算反向传播的数值的元组
"""
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = reg_utils.relu(Z1)
# 下面的步骤1-4对应于上述的步骤1-4。
D1 = np.random.rand(A1.shape[0], A1.shape[1]) # 步骤1:初始化矩阵D1 = np.random.rand(..., ...)
D1 = D1 < keep_prob # 步骤2:将D1的值转换为0或1(使用keep_prob作为阈值)
A1 = A1 * D1 # 步骤3:舍弃A1的一些节点(将它的值变为0或False)
A1 = A1 / keep_prob # 步骤4:缩放未舍弃的节点(不为0)的值
"""
#不理解的同学运行一下下面代码就知道了。
import numpy as np
np.random.seed(1)
A1 = np.random.randn(1,3)
D1 = np.random.rand(A1.shape[0],A1.shape[1])
keep_prob=0.5
D1 = D1 < keep_prob
print(D1)
A1 = 0.01
A1 = A1 * D1
A1 = A1 / keep_prob
print(A1)
结果是
[[ True False True]]
[[0.02 0. 0.02]]
"""
Z2 = np.dot(W2, A1) + b2
A2 = reg_utils.relu(Z2)
# 下面的步骤1-4对应于上述的步骤1-4。
D2 = np.random.rand(A2.shape[0], A2.shape[1]) # 步骤1:初始化矩阵D2 = np.random.rand(..., ...)
D2 = D2 < keep_prob # 步骤2:将D2的值转换为0或1(使用keep_prob作为阈值)
A2 = A2 * D2 # 步骤3:舍弃A1的一些节点(将它的值变为0或False)
A2 = A2 / keep_prob # 步骤4:缩放未舍弃的节点(不为0)的值
Z3 = np.dot(W3, A2) + b3
A3 = reg_utils.sigmoid(Z3)
cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)
return A3, cache
|
530d65644647efe4dc0f499cfc60869d412cfdf2
| 3,646,613
|
import warnings
def getPotInstance(pot_name):
""" Try to get an instance of a give pot_name
Return
----------
pot_module: module object of pot_name, if it is a combined list, return None
pot_instance: module instance, if it is not 3D or not available, return None
"""
pot_module = None
pot_instance=None
if (pot_name in dir(galpy.potential)) & ('Potential' in pot_name):
pot_module = galpy.potential.__getattribute__(pot_name)
if (type(pot_module) == list):
pot_instance = pot_module
pot_module = None
elif (type(pot_module) == type):
# get instance
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
pot_instance = pot_module()
except (ValueError, TypeError, AttributeError, RuntimeWarning):
pot_module = None
pot_instance = None
else:
pot_instance = pot_module
pot_module = type(pot_module)
if (pot_instance != None):
# remove 2D models
if (galpy.potential._dim(pot_instance)!=3):
pot_instance = None
# remove potential without c support
if (not _check_c(pot_instance)):
pot_instance = None
return pot_module, pot_instance
|
998de5b58e93832499f33852fc4fb3970491de4f
| 3,646,614
|
from typing import Dict
def upload_processed_files(job_context: Dict) -> Dict:
"""Uploads the processed files and removes the temp dir for the job.
If job_context contains a "files_to_upload" key then only those
files will be uploaded. Otherwise all files will be uploaded.
If job_context contains a "job_dir_prefix" key then that will be
passed through to the file methods as the `dir_name` parameter.
"""
if "files_to_upload" in job_context:
files = job_context["files_to_upload"]
else:
files = File.objects.filter(batch__in=job_context["batches"])
if "job_dir_prefix" in job_context:
job_dir_prefix = job_context["job_dir_prefix"]
else:
job_dir_prefix = None
try:
for file in files:
file.upload_processed_file(job_dir_prefix)
except Exception:
logger.exception("Exception caught while uploading processed file %s",
batch=files[0].batch.id,
processor_job=job_context["job_id"])
job_context["job"].failure_reason = "Exception caught while uploading processed file."
job_context["success"] = False
return job_context
finally:
# Whether or not uploading was successful, the job is over so
# clean up the temp directory.
files[0].remove_temp_directory(job_dir_prefix)
return job_context
|
cb43f13e47ab825e1376a3421063fe0a21ff9345
| 3,646,615
|
def update_hook(branch, from_rev, to_rev, installdir):
""" Function to be called from the update hook """
if from_rev == gitinfo.NULL_COMMIT:
from_rev = gitinfo.START_COMMIT
changeset_info = githook.UpdateGitInfo(branch, from_rev, to_rev)
hooks_ok = run_hooks(changeset_info, installdir)
messages_ok = message_check.check_messages(changeset_info.commit_messages())
return hooks_ok and messages_ok
|
4526ad1316ef823615bb41f80e09f7f0f256ba07
| 3,646,616
|
def definition():
"""
Most recent student numbers and fees by set
(i.e. by year, costcentre and set category.),
aggregated by fee, aos code, seesion and fee_category.
"""
sql = """
select s.set_id, s.acad_year, s.costc,
s.set_cat_id,
fsc.description as set_cat_description,
fs.fee_cat_id as fee_cat,
cc.default_aos_code,
n.aos_code, n.session,
o.description as origin_description, o.origin_id,
SUM(n.student_count) as student_count,
a.fee_scheme_id,
SUM(f.gross_fee-f.waiver) as individual_fee,
SUM(n.student_count*(f.gross_fee-f.waiver)) as net_fee
FROM s_number n
INNER JOIN v_s_instance_mri i ON i.instance_Id = n.instance_id
INNER JOIN f_set s ON s.set_id = i.set_id
INNER JOIN fs_cost_centre cc ON cc.costc = s.costc
INNER JOIN f_set_cat fsc ON fsc.set_cat_id = s.set_cat_id
INNER JOIN s_fee_status fs ON fs.fee_status_id = n.fee_status_id
INNER JOIN c_aos_code a ON a.aos_code = n.aos_code
INNER JOIN s_fee f ON f.acad_year = s.acad_year
AND f.fee_cat_id = fs.fee_cat_id
AND f.fee_scheme_id = a.fee_scheme_id
AND f.session = n.session
INNER JOIN s_origin o ON o.origin_id = n.origin_id
GROUP BY s.acad_year, s.costc, s.set_cat_id,
fs.fee_cat_id,
n.aos_code, n.session,
a.fee_scheme_id, fsc.description,
o.description, s.set_id,
cc.default_aos_code,
o.origin_id
"""
return sql
|
18ded7340bf8786a531faf76c702e682bb44e0f3
| 3,646,617
|
def get_geo_distance(p1, p2, signed=False):
"""Returns distance (meters) between to lat/lon points"""
d = geopy.distance.vincenty(p1, p2).m # .m for meters
return -d if (p2[0] < p1[0] or p2[1] > p1[1]) else d
|
162682ba9e45d2caad532d04e70cdeb9d6c02901
| 3,646,619
|
def in_yelling(channel):
"""
checks that channel is #yelling
exists for test mocking
"""
chan = bot.channels.get(channel)
return chan and chan.name == "yelling"
|
3fafb60d81944badfd27136820569a25b2d0b7b9
| 3,646,621
|
def draw_adjacency_list():
"""Solution to exercise R-14.4.
Draw an adjacency list representation of the undirected graph shown in
Figure 14.1.
---------------------------------------------------------------------------
Solution:
---------------------------------------------------------------------------
I will re-use the edge labels from Exercise R-14.3:
Snoeyink --- Goodrich a
Garg --- Goodrich b
Garg --- Tamassia c
Goldwasser --- Goodrich d
Goldwasser --- Tamassia e
Goodrich --- Tamassia f
Goodrich --- Vitter g
Goodrich --- Chiang h
Tamassia --- Tollis i
Tamassia --- Vitter j
Tamassia --- Preparata k
Tamassia --- Chiang l
Tollis --- Vitter m
Vitter --- Preparata n
Preparata --- Chiang o
The adjacency list V is a list of vertices v that each point to a
collection I(v) that contains the incident edges of v.
Snoeyink --> {a}
Garg --> {b, c}
Goldwasser --> {d, e}
Goodrich --> {a, b, d, f, g, h}
Tamassia --> {c, e, f, i, j, k, l}
Vitter --> {g, j, m, n}
Chiang --> {h, l, o}
Tollis --> {i, m}
Preparata --> {k, n, o}
Note that each edge appears twice in the adjacency list, for a total of
2*m = 2*15 = 30 edges.
"""
return True
|
95c02ad974f2d964596cf770708ed11aa061ea49
| 3,646,622
|
def get_user_categories(user_id, public=False):
"""Get a user's categories.
Arguments: user_id as int, Boolean 'public' (optional).
Returns list of Category objects. Either all private or all public.
"""
return db_session.query(Category).filter((Category.user_id==user_id)&(Category.public==public)).all()
|
fcc4ce90233771bb149120bc898ed52a12e46888
| 3,646,624
|
def dict_to_one(dp_dict):
"""Input a dictionary, return a dictionary that all items are set to one.
Used for disable dropout, dropconnect layer and so on.
Parameters
----------
dp_dict : dictionary
The dictionary contains key and number, e.g. keeping probabilities.
Examples
--------
>>> dp_dict = dict_to_one( network.all_drop )
>>> dp_dict = dict_to_one( network.all_drop )
>>> feed_dict.update(dp_dict)
"""
return {x: 1 for x in dp_dict}
|
9d18b027a0458ca6e769a932f00705a32edcb3e7
| 3,646,625
|
def file_io_read_img_slice(path, slicing, axis, is_label, normalize_spacing=True, normalize_intensities=True, squeeze_image=True,adaptive_padding=4):
"""
:param path: file path
:param slicing: int, the nth slice of the img would be sliced
:param axis: int, the nth axis of the img would be sliced
:param is_label: the img is label
:param normalize_spacing: normalized the spacing
:param normalize_intensities: normalized the img
:param squeeze_image:
:param adaptive_padding: padding the img to favored size, (divided by certain number, here is 4), here using default 4 , favored by cuda fft
:return:
"""
normalize_intensities = False if is_label else normalize_intensities
im, hdr, spacing, normalized_spacing = fileio.ImageIO().read(path, normalize_intensities, squeeze_image,adaptive_padding)
if normalize_spacing:
spacing = normalized_spacing
else:
spacing = spacing
if axis == 1:
slice = im[slicing]
slicing_spacing = spacing[1:]
elif axis == 2:
slice = im[:,slicing,:]
slicing_spacing = np.asarray([spacing[0], spacing[2]])
elif axis == 3:
slice = im[:,:,slicing]
slicing_spacing = spacing[:2]
else:
raise ValueError("slicing axis exceed, should be 1-3")
info = { 'spacing':slicing_spacing, 'img_size': slice.shape}
return slice, info
|
a55bb95bf152e144b7260716c60262f90f1650f4
| 3,646,626
|
def is_replaced_image(url):
"""
>>> is_replaced_image('https://rss.anyant.com/123.jpg?rssant=1')
True
"""
return url and RSSANT_IMAGE_TAG in url
|
292e04bd8dceb2dcc8b2c58dc24142158848c221
| 3,646,628
|
def get_raw_segment(fast5_fn, start_base_idx, end_base_idx, basecall_group='Basecall_1D_000',
basecall_subgroup='BaseCalled_template'):
"""
Get the raw signal segment given the start and end snp_id of the sequence.
fast5_fn: input fast5 file name.
start_base_idx: start snp_id of the sequence (0-based)
end_base_idx: end snp_id of the sequence (the snp_id is included)
basecall_group: group name to search for base information.
basecall_subgroup: sub grou#!p name to search for base information.
e.g.
get_raw_segment('test.fast5', 0, 10)
Will return the signal corresponded to the 0-10 bases(The 0th and 10th base are both included.)
"""
with h5py.File(fast5_fn, 'r') as root:
base = root['Analyses/{}/BaseCalled_template'.format(basecall_group)]
fastq = base['Fastq'].value.split()[2]
seg = fastq[start_base_idx:end_base_idx]
event_h = base['Events']
events = event_h.value
raw_h = list(root['/Raw/Reads'].values())
raw = raw_h[0]['Signal']
start_time = None
if (type(events[0][1]) is np.float64) or (type(events[0][1]) is np.float32):
start_time = event_h.attrs['start_time']
pos = list()
pos_idx = 0
for event in events:
pos_idx += event[5]
pos.append(pos_idx)
start_idx = next(x[0] for x in enumerate(pos) if x[1] >= start_base_idx)
end_idx = next(x[0] - 1 for x in enumerate(pos) if x[1] > end_base_idx)
if start_time is None:
raw_start = events[start_idx][1]
raw_end = events[end_idx][1]
else:
raw_start = int((events[start_idx][1] - start_time) / 0.00025)
raw_end = int((events[end_idx][1] - start_time) / 0.00025)
seg_raw = raw[raw_start:raw_end]
return seg_raw, seg
|
3f5ed060bacfa6feeb25dce02a3829d2b7496ee9
| 3,646,629
|
def center_distance(gt_box: EvalBox, pred_box: EvalBox) -> float:
"""
L2 distance between the box centers (xy only).
:param gt_box: GT annotation sample.
:param pred_box: Predicted sample.
:return: L2 distance.
"""
return np.linalg.norm(np.array(pred_box.translation[:2]) - np.array(gt_box.translation[:2]))
|
7b5106dca604d6c1c18e6f4c3d913a78c37577bb
| 3,646,630
|
def Dc(z, unit, cosmo):
"""
Input:
z: redshift
unit: distance unit in kpc, Mpc, ...
cosmo: dicitonary of cosmology parameters
Output:
res: comoving distance in unit as defined by variable 'unit'
"""
res = cosmo.comoving_distance(z).to_value(unit) #*cosmo.h
return res
|
02985b75bd24b2a18b07f7f3e158f3c6217fdf18
| 3,646,631
|
def _get_all_answer_ids(
column_ids,
row_ids,
questions,
):
"""Maps lists of questions with answer coordinates to token indexes."""
answer_ids = [0] * len(column_ids)
found_answers = set()
all_answers = set()
for question in questions:
for answer in question.answer.answer_coordinates:
all_answers.add((answer.column_index, answer.row_index))
for index in _get_cell_token_indexes(column_ids, row_ids,
answer.column_index,
answer.row_index):
found_answers.add((answer.column_index, answer.row_index))
answer_ids[index] = 1
missing_count = len(all_answers) - len(found_answers)
return answer_ids, missing_count
|
260ae3e32d6c56b63d801e75e22622b9356cb44a
| 3,646,632
|
def transduce(source, transducer) -> ObservableBase:
"""Execute a transducer to transform the observable sequence.
Keyword arguments:
:param Transducer transducer: A transducer to execute.
:returns: An Observable sequence containing the results from the
transducer.
:rtype: Observable
"""
def subscribe(observer, scheduler=None):
xform = transducer(Observing(observer))
def on_next(value):
try:
xform.step(observer, value)
except Exception as exn:
observer.on_error(exn)
def on_completed():
xform.complete(observer)
return source.subscribe_(on_next, observer.on_error, on_completed)
return AnonymousObservable(subscribe)
|
8d0a3c12b16cd4984e9e49d66a459947d5f1315f
| 3,646,633
|
def get_phonopy_gibbs(
energies,
volumes,
force_constants,
structure,
t_min,
t_step,
t_max,
mesh,
eos,
pressure=0,
):
"""
Compute QHA gibbs free energy using the phonopy interface.
Args:
energies (list):
volumes (list):
force_constants (list):
structure (Structure):
t_min (float): min temperature
t_step (float): temperature step
t_max (float): max temperature
mesh (list/tuple): reciprocal space density
eos (str): equation of state used for fitting the energies and the volumes.
options supported by phonopy: vinet, murnaghan, birch_murnaghan
pressure (float): in GPa, optional.
Returns:
(numpy.ndarray, numpy.ndarray): Gibbs free energy, Temperature
"""
# quasi-harmonic approx
phonopy_qha = get_phonopy_qha(
energies,
volumes,
force_constants,
structure,
t_min,
t_step,
t_max,
mesh,
eos,
pressure=pressure,
)
# gibbs free energy and temperature
max_t_index = phonopy_qha._qha._len
G = phonopy_qha.get_gibbs_temperature()[:max_t_index]
T = phonopy_qha._qha._temperatures[:max_t_index]
return G, T
|
df162a9cfc95417ba684caadba776243c2eb867d
| 3,646,635
|
def forgot():
"""
Allows an administrator to state they forgot their password,
triggering a email for further instructions on how to reset their password.
"""
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RequestResetPasswordForm()
if form.validate_on_submit():
user = Administrator.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash('Check your email for the instructions to reset your password')
return redirect(url_for('auth.login'))
return render_template('forgot_password.html', title='Reset Password', form=form)
|
7e91014fbbc57c5a0e96a306865d1ff13dd1ddbe
| 3,646,636
|
def diffractionAngle(inc):
"""Return the diffraction angle for the UV yaw system
Input graze angle in degrees
Output diffraction graze angle in degrees"""
alpha0 = np.sin((90.-inc)*np.pi/180)
alpha1 = alpha0 - 266e-9/160e-9
dang = 90 - np.arcsin(np.abs(alpha1))*180/np.pi
return dang
|
334dfb79c1d7ea39ed2b89ebb0592dbaf166144a
| 3,646,637
|
def get_subject(email):
"""
Takes an email Message object and returns the Subject as a string,
decoding base64-encoded subject lines as necessary.
"""
subject = email.get('Subject', '')
result = decode_header(subject)
subject = result[0][0]
if isinstance(subject, str):
return subject
else:
return subject.decode('unicode_escape')
|
c0e34d63532688827050d427479dd79f43cda48f
| 3,646,639
|
def distort(dist_mat, mat):
"""Apply distortion matrix to lattice vectors or sites.
Coordinates are assumed to be Cartesian."""
array = np.array(mat)
for i in range(len(mat)):
array[i, :] = np.array([np.sum(dist_mat[0, :]*mat[i, :]),
np.sum(dist_mat[1, :]*mat[i, :]),
np.sum(dist_mat[2, :]*mat[i, :])])
return array
|
caa28cef38a7ac827b67100fe17479a48dfb72fd
| 3,646,640
|
from ._sample import sample_
from typing import Union
import typing
from typing import Any
from typing import Optional
import abc
from typing import Callable
def sample(
sampler: Union[typing.RelativeTime, Observable[Any]],
scheduler: Optional[abc.SchedulerBase] = None,
) -> Callable[[Observable[_T]], Observable[_T]]:
"""Samples the observable sequence at each interval.
.. marble::
:alt: sample
---1-2-3-4------|
[ sample(4) ]
----1---3---4---|
Examples:
>>> res = sample(sample_observable) # Sampler tick sequence
>>> res = sample(5.0) # 5 seconds
Args:
sampler: Observable used to sample the source observable **or** time
interval at which to sample (specified as a float denoting
seconds or an instance of timedelta).
scheduler: Scheduler to use only when a time interval is given.
Returns:
An operator function that takes an observable source and
returns a sampled observable sequence.
"""
return sample_(sampler, scheduler)
|
a127926dbffa7035f0da67edf138403a5df66d59
| 3,646,641
|
def get_genome_dir(infra_id, genver=None, annver=None, key=None):
"""Return the genome directory name from infra_id and optional arguments."""
dirname = f"{infra_id}"
if genver is not None:
dirname += f".gnm{genver}"
if annver is not None:
dirname += f".ann{annver}"
if key is not None:
dirname += f".{key}"
return dirname
|
ab033772575ae30ae346f96aed840c48fb01c556
| 3,646,642
|
from typing import Callable
from typing import Any
def stream_with_context(func: Callable) -> Callable:
"""Share the current request context with a generator.
This allows the request context to be accessed within a streaming
generator, for example,
.. code-block:: python
@app.route('/')
def index() -> AsyncGenerator[bytes, None]:
@stream_with_context
async def generator() -> bytes:
yield request.method.encode()
yield b' '
yield request.path.encode()
return generator()
"""
request_context = _request_ctx_stack.top.copy()
@wraps(func)
async def generator(*args: Any, **kwargs: Any) -> Any:
async with request_context:
async for data in func(*args, **kwargs):
yield data
return generator
|
ebfe2fc660fb68803ddc0517a3a5708748fe88ea
| 3,646,643
|
def silence_removal(signal, sampling_rate, st_win, st_step, smooth_window=0.5, weight=0.5, plot=False):
"""
Event Detection (silence removal)
ARGUMENTS:
- signal: the input audio signal
- sampling_rate: sampling freq
- st_win, st_step: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- weight: (optinal) weight factor (0 < weight < 1)
the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9],
[1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds
and (1.4, 3.0) seconds
"""
if weight >= 1:
weight = 0.99
if weight <= 0:
weight = 0.01
# Step 1: feature extraction
# signal = audioBasicIO.stereo_to_mono(signal)
st_feats, _ = feature_extraction(signal, sampling_rate,
st_win * sampling_rate,
st_step * sampling_rate)
# Step 2: train binary svm classifier of low vs high energy frames
# keep only the energy short-term sequence (2nd feature)
st_energy = st_feats[1, :]
en = np.sort(st_energy)
# number of 10% of the total short-term windows
st_windows_fraction = int(len(en) / 10)
# compute "lower" 10% energy threshold
low_threshold = np.mean(en[0:st_windows_fraction]) + 1e-15
# compute "higher" 10% energy threshold
high_threshold = np.mean(en[-st_windows_fraction:-1]) + 1e-15
# get all features that correspond to low energy
low_energy = st_feats[:, np.where(st_energy <= low_threshold)[0]]
# get all features that correspond to high energy
high_energy = st_feats[:, np.where(st_energy >= high_threshold)[0]]
# form the binary classification task and ...
features = [low_energy.T, high_energy.T]
# normalize and train the respective svm probabilistic model
# (ONSET vs SILENCE)
features_norm, mean, std = normalize_features(features)
svm = train_svm(features_norm, 1.0)
# Step 3: compute onset probability based on the trained svm
prob_on_set = []
for index in range(st_feats.shape[1]):
# for each frame
cur_fv = (st_feats[:, index] - mean) / std
# get svm probability (that it belongs to the ONSET class)
prob_on_set.append(svm.predict_proba(cur_fv.reshape(1, -1))[0][1])
prob_on_set = np.array(prob_on_set)
# smooth probability:
prob_on_set = smooth_moving_avg(prob_on_set, smooth_window / st_step)
# Step 4A: detect onset frame indices:
prog_on_set_sort = np.sort(prob_on_set)
# find probability Threshold as a weighted average
# of top 10% and lower 10% of the values
nt = int(prog_on_set_sort.shape[0] / 10)
threshold = (np.mean((1 - weight) * prog_on_set_sort[0:nt]) +
weight * np.mean(prog_on_set_sort[-nt::]))
max_indices = np.where(prob_on_set > threshold)[0]
# get the indices of the frames that satisfy the thresholding
index = 0
seg_limits = []
time_clusters = []
# Step 4B: group frame indices to onset segments
while index < len(max_indices):
# for each of the detected onset indices
cur_cluster = [max_indices[index]]
if index == len(max_indices)-1:
break
while max_indices[index+1] - cur_cluster[-1] <= 2:
cur_cluster.append(max_indices[index+1])
index += 1
if index == len(max_indices)-1:
break
index += 1
time_clusters.append(cur_cluster)
seg_limits.append([cur_cluster[0] * st_step,
cur_cluster[-1] * st_step])
# Step 5: Post process: remove very small segments:
min_duration = 0.2
seg_limits_2 = []
for s_lim in seg_limits:
if s_lim[1] - s_lim[0] > min_duration:
seg_limits_2.append(s_lim)
return seg_limits_2
|
0b6f3efab0c04a52af2667a461b9f2db7a256a1b
| 3,646,644
|
import signal
def extrema(x):
"""
Gets the local extrema points from a time series. This includes endpoints if necessary.
Note that the indices will start counting from 1 to match MatLab.
Args:
x: time series vector
Returns:
imin: indices of XMIN
"""
x = np.asarray(x)
imin = signal.argrelextrema(x, np.less)[0]
if(x[-1] < x[-2]): # Check last point
imin = np.append(imin, len(x)-1)
if(x[0] < x[1]): # Check first point
imin = np.insert(imin, 0, 0)
xmin = x[imin]
minorder = np.argsort(xmin)
imin = imin[minorder]
return imin+1
|
b4464398acb502741a48fe713438b2ebdc3d3063
| 3,646,646
|
import array
def d2X_dt2_Vanderpol(X, t=0):
""" Return the Jacobian matrix evaluated in X. """
return array([[0, 1 ],
[-2*r*X[1]*X[0]-w**2 , r*(1-X[0]**2)]])
|
47fff4981e130ac0140884553ff083fef501e479
| 3,646,647
|
def domain(domain):
"""Locate the given domain in our database and
render an info page for it.
"""
current_app.logger.info('domain [%s]' % domain)
g.domain = current_app.iwn.domain(domain)
if g.domain is None:
return Response('', 404)
else:
return render_template('domain.jinja')
|
0f987c538835aa2b7eb6bb95643465a990246b39
| 3,646,648
|
def gaussian_filter(img, kernel_size, sigma=0):
"""take value weighted by pixel distance in the neighbourhood of center pixel.
"""
return cv2.GaussianBlur(img, ksize=kernel_size, sigmaX=sigma, sigmaY=sigma)
|
ef2c22528f88ccfa1bdae28dd1b814a01cae0261
| 3,646,649
|
def extractBillAdoptedLinks(soup):
"""Extract list of links for Adopted Bill Texts (HTML & PDF Versions)
"""
tables = soup.find_all("table")
content_table = [t for t in tables if t.text.strip().startswith("View Available Bill Summaries")][-1]
adopted_links = {}
for row in content_table.find_all('tr'):
cols = row.find_all('td')
if len(cols) > 1:
label = cols[0].text.strip('[HTML]').strip().encode('utf8').replace(b'\xc2\xa0', b' ')
if label in [b'Adopted', b'Ratified']:
links = cols[0].find_all('a')
pdf_link = links[0]['href']
html_link = links[1]['href']
adopted_links = {'label' : label, 'pdf' : pdf_link, 'html' : html_link}
return(adopted_links)
|
58089f525beece643a617bd9a208a653e2b7e5b8
| 3,646,650
|
def uniq(string):
"""Removes duplicate words from a string (only the second duplicates).
The sequence of the words will not be changed.
"""
words = string.split()
return ' '.join(sorted(set(words), key=words.index))
|
2e5b6c51bc90f3a2bd7a4c3e845f7ae330390a76
| 3,646,651
|
def l2_regularizer(
params: kfac_jax.utils.Params,
haiku_exclude_batch_norm: bool,
haiku_exclude_biases: bool,
) -> chex.Array:
"""Computes an L2 regularizer."""
if haiku_exclude_batch_norm:
params = hk.data_structures.filter(
lambda m, n, p: "batchnorm" not in m, params)
if haiku_exclude_biases:
params = hk.data_structures.filter(
lambda m, n, p: n != "b", params)
return 0.5 * kfac_jax.utils.inner_product(params, params)
|
a13966229968aa40f27a1c5c1eb3bc8d3f578aca
| 3,646,652
|
def _normalize(log_weights):
"""Normalize log-weights into weights and return resulting weights and log-likelihood increment."""
n = log_weights.shape[0]
max_logw = jnp.max(log_weights)
w = jnp.exp(log_weights - max_logw)
w_mean = w.mean()
log_likelihood_increment = jnp.log(w_mean) + max_logw
w = w / (n * w_mean)
return w, log_likelihood_increment
|
1abaf1dd818e5012db8ec8ad5de1e718f934af54
| 3,646,653
|
import multiprocessing
def pq_compute_multi_core(matched_annotations_list,
gt_folder,
pred_folder,
categories,
file_client=None,
nproc=32):
"""Evaluate the metrics of Panoptic Segmentation with multithreading.
Same as the function with the same name in `panopticapi`.
Args:
matched_annotations_list (list): The matched annotation list. Each
element is a tuple of annotations of the same image with the
format (gt_anns, pred_anns).
gt_folder (str): The path of the ground truth images.
pred_folder (str): The path of the prediction images.
categories (str): The categories of the dataset.
file_client (object): The file client of the dataset. If None,
the backend will be set to `disk`.
nproc (int): Number of processes for panoptic quality computing.
Defaults to 32. When `nproc` exceeds the number of cpu cores,
the number of cpu cores is used.
"""
if PQStat is None:
raise RuntimeError(
'panopticapi is not installed, please install it by: '
'pip install git+https://github.com/cocodataset/'
'panopticapi.git.')
if file_client is None:
file_client_args = dict(backend='disk')
file_client = mmcv.FileClient(**file_client_args)
cpu_num = min(nproc, multiprocessing.cpu_count())
annotations_split = np.array_split(matched_annotations_list, cpu_num)
print('Number of cores: {}, images per core: {}'.format(
cpu_num, len(annotations_split[0])))
workers = multiprocessing.Pool(processes=cpu_num)
processes = []
for proc_id, annotation_set in enumerate(annotations_split):
p = workers.apply_async(pq_compute_single_core,
(proc_id, annotation_set, gt_folder,
pred_folder, categories, file_client))
processes.append(p)
# Close the process pool, otherwise it will lead to memory
# leaking problems.
workers.close()
workers.join()
pq_stat = PQStat()
for p in processes:
pq_stat += p.get()
return pq_stat
|
354a4c6133d8d2030694e71716e7beb487f845a0
| 3,646,654
|
def logout():
"""
Function that handles logout of user
---
POST:
description: remove curent user in the session
responses:
200:
description:
Successfuly log out user from the session.
"""
logout_user() # flask logout library
return redirect("/", code=200)
|
86fc7d43f2ec13bcf966f2ddc592dacd86f0d158
| 3,646,655
|
import ctypes
def header(data):
"""Create class based on decode of a PCI configuration space header from raw data."""
buf = ctypes.create_string_buffer(data, len(data))
addr = ctypes.addressof(buf)
field_list = header_field_list(addr)
return header_factory(field_list).from_buffer_copy(data)
|
ca3c4ee8f4f52e2f3b62ee183c858ae64a24e4d5
| 3,646,656
|
def FilterByKeyUsingSideInput(pcoll, lookup_entries, filter_key):
"""Filters a single collection by a single lookup collection, using a common key.
Given:
- a `PCollection` (lookup_entries) of `(V)`, as a lookup collection
- a `PCollection` (pcoll) of `(V)`, as values to be filtered
- a common key (filter_key)
A dictionary called `filter_dict` - is created by mapping the value of `filter_key`
for each entry in `lookup_entries` to True.
Then, for each item in pcoll, the value associated with `filter_key` checkd against
`filter_dict`, and if it is found, the entry passes through. Otherwise, the entry is
discarded.
Note: `lookup_entries` will be used as a **side input**, so care
must be taken regarding the size of the `lookup_entries`
"""
filter_dict_prepared = beam.pvalue.AsDict(
lookup_entries | beam.Map(lambda row: (row[filter_key], True))
)
def _filter_fn(row, filter_dict):
return row[filter_key] in filter_dict
return pcoll | beam.Filter(_filter_fn, filter_dict=filter_dict_prepared)
|
6318c46bf8725e3bcc65329af5dd2feceae208c0
| 3,646,657
|
def sw_maxent_irl(x, xtr, phi, phi_bar, max_path_length, nll_only=False):
"""Maximum Entropy IRL using our exact algorithm
Returns NLL and NLL gradient of the demonstration data under the proposed reward
parameters x.
N.b. the computed NLL here doesn't include the contribution from the MDP dynamics
for each path - this term is independent of the parameter x, so doesn't affect the
optimization result.
Args:
x (numpy array): Current reward function parameter vector estimate
xtr (mdp_extras.BaseExtras): Extras object for the MDP being
optimized
phi (mdp_extras.FeatureFunction): Feature function to use with linear reward
parameters. We require len(phi) == len(x).
phi_bar (numpy array): Feature expectation. N.b. if using a weighted feature
expectation, it is very important to make sure the weights you used
sum to 1.0!
max_path_length (int): Maximum path length
nll_only (bool): If true, only return NLL
Returns:
(float): Negative Log Likelihood of a MaxEnt model with x as the reward
parameters and the given feature expectation
(numpy array): Downhill gradient of negative log likelihood at the given point
"""
# Store current argument guess
r_linear = Linear(x)
if isinstance(xtr, DiscreteExplicitExtras):
# Process tabular MDP
# Explode reward function to indicator arrays
rs, rsa, rsas = r_linear.structured(xtr, phi)
# Catch float overflow as an error - reward magnitude is too large for
# exponentiation with this max path length
with np.errstate(over="raise"):
# Compute backward message
alpha_log = nb_backward_pass_log(
xtr.p0s,
max_path_length,
xtr.t_mat,
gamma=xtr.gamma,
rs=rs,
rsa=rsa,
rsas=rsas,
)
# Compute partition value
Z_theta_log = log_partition(
max_path_length, alpha_log, padded=xtr.is_padded
)
# Compute NLL
nll = Z_theta_log - x @ phi_bar
if nll_only:
return nll
else:
# Compute gradient
with np.errstate(over="raise"):
# Compute forward message
beta_log = nb_forward_pass_log(
max_path_length,
xtr.t_mat,
gamma=xtr.gamma,
rs=rs,
rsa=rsa,
rsas=rsas,
)
# Compute transition marginals
pts_log, ptsa_log, ptsas_log = nb_marginals_log(
max_path_length,
xtr.t_mat,
alpha_log,
beta_log,
Z_theta_log,
gamma=xtr.gamma,
rsa=rsa,
rsas=rsas,
)
# Compute gradient based on feature type
if phi.type == Disjoint.Type.OBSERVATION:
s_counts = np.sum(np.exp(pts_log), axis=-1)
efv_s = np.sum([s_counts[s] * phi(s) for s in xtr.states], axis=0)
nll_grad = efv_s - phi_bar
elif phi.type == Disjoint.Type.OBSERVATION_ACTION:
sa_counts = np.sum(np.exp(ptsa_log), axis=-1)
efv_sa = np.sum(
[
sa_counts[s1, a] * phi(s1, a)
for s1 in xtr.states
for a in xtr.actions
],
axis=0,
)
nll_grad = efv_sa - phi_bar
elif phi.type == Disjoint.Type.OBSERVATION_ACTION_OBSERVATION:
sas_counts = np.sum(np.exp(ptsas_log), axis=-1)
efv_sas = np.sum(
[
sas_counts[s1, a, s2] * phi(s1, a, s2)
for s1 in xtr.states
for a in xtr.actions
for s2 in xtr.states
],
axis=0,
)
nll_grad = efv_sas - phi_bar
else:
raise ValueError
return nll, nll_grad
elif isinstance(xtr, DiscreteImplicitExtras):
# Handle Implicit dynamics MDP
# Only supports state features - otherwise we run out of memory
assert (
phi.type == phi.Type.OBSERVATION
), "For DiscreteImplicit MPDs only state-based rewards are supported"
# Only supports deterministic transitions
assert (
xtr.is_deterministic
), "For DiscreteImplicit MPDs only deterministic dynamics are supported"
rs = np.array([r_linear(phi(s)) for s in xtr.states])
# Catch float overflow as an error - reward magnitude is too large for
# exponentiation with this max path length
with np.errstate(over="raise"):
# Compute alpha_log
alpha_log = nb_backward_pass_log_deterministic_stateonly(
xtr.p0s,
max_path_length,
xtr.parents_fixedsize,
rs,
gamma=xtr.gamma,
padded=xtr.is_padded,
)
# Compute partition value
Z_theta_log = log_partition(
max_path_length, alpha_log, padded=xtr.is_padded
)
# Compute NLL
nll = Z_theta_log - x @ phi_bar
if nll_only:
return nll
else:
# Compute NLL gradient as well
with np.errstate(over="raise"):
# Compute beta_log
beta_log = nb_forward_pass_log_deterministic_stateonly(
max_path_length, xtr.children_fixedsize, rs, gamma=xtr.gamma
)
# Compute transition marginals pts_log (not ptsa, ptsas)
pts_log = nb_marginals_log_deterministic_stateonly(
max_path_length,
xtr.children_fixedsize,
alpha_log,
beta_log,
Z_theta_log,
)
# Compute gradient
s_counts = np.sum(np.exp(pts_log), axis=-1)
efv_s = np.sum([s_counts[s] * phi(s) for s in xtr.states], axis=0)
nll_grad = efv_s - phi_bar
return nll, nll_grad
else:
# Unknown MDP type
raise ValueError(f"Unknown MDP class {xtr}")
|
fd52750ac8cff60cdce3a6bce09b8bd0b900e08f
| 3,646,658
|
def __multiprocess_point_in_poly(df: pd.DataFrame,
x: str,
y: str,
poly: Polygon):
"""
Return rows in dataframe who's values for x and y are contained in some polygon coordinate shape
Parameters
----------
df: Pandas.DataFrame
Data to query
x: str
name of x-axis plane
y: str
name of y-axis plane
poly: shapely.geometry.Polygon
Polygon object to search
Returns
--------
Pandas.DataFrame
Masked DataFrame containing only those rows that fall within the Polygon
"""
mask = df.apply(lambda r: poly.contains(Point(r[x], r[y])), axis=1)
return df.loc[mask]
|
dc89af19f17c79fd4b5c74d183e6fb6d89d14bec
| 3,646,659
|
import time
def print_runtime(func, create_global_dict=True):
"""
A timer decorator that creates a global dict for reporting times across multiple runs
"""
def function_timer(*args, **kwargs):
"""
A nested function for timing other functions
"""
start = time.time()
value = func(*args, **kwargs)
end = time.time()
runtime = end - start
print(f"The runtime for {func.__name__} took {round(runtime, 2)} \
seconds to complete")
return value
return function_timer
|
066b57281efad3fe672ba0737b783233dce9bef0
| 3,646,660
|
def generate_astrometry(kop, time_list):
"""
Simulates observational data.
:param kop: Keplerian orbit parameters
:param time_list: List of observation times
:return: astrometry
"""
trajectory = generate_complete_trajectory(kop, time_list)
return {'t':time_list,
'x':trajectory['position'].T[0],
'y':trajectory['position'].T[1],
'vz':trajectory['velocity'].T[2]}
|
9fc52b7cf4fa8b74e19bf3b0e416895fa40f8fee
| 3,646,661
|
import io
def _open_remote(file_ref):
"""Retrieve an open handle to a file.
"""
return io.StringIO(_run_gsutil(["cat", file_ref]).decode())
|
ea7571ed04b98e2aca72177a9c980aedf1647d1e
| 3,646,662
|
def fit(image, labels, featurizer="../model/saved_model/UNet_hpa_4c_mean_8.pth"):
"""Train a pixel classifier.
Parameters
----------
image: np.ndarray
Image data to be classified.
labels: np.ndarray
Sparse classification, where 0 pixels are ingored, and other integer
values correspond to class membership.
multichannel: bool, optional
If image data is multichannel.
Returns
----------
classifier: sklearn.ensemble.RandomForestClassifier
Object that can perform classifications
"""
print(featurizer)
# pad input image
w,h = image.shape[-2:]
w_padding = int((16-w%16)/2) if w%16 >0 else 0
h_padding = int((16-h%16)/2) if h%16 >0 else 0
if len(image.shape) == 3:
image = np.pad(image, ((0,0),(w_padding, w_padding),(h_padding, h_padding)), 'constant')
elif len(image.shape) == 2:
image = np.pad(image, ((w_padding, w_padding),(h_padding, h_padding)), 'constant')
# make sure image has four dimentions (b,c,w,h)
while len(image.shape) < 4:
image = np.expand_dims(image, 0)
image = np.transpose(image, (1,0,2,3))
# choose filter or unet featurizer
if featurizer == "filter":
features = filter_featurize(image)
else:
features = unet_featurize(image, featurizer)
# crop out paddings
if w_padding > 0:
features = features[w_padding:-w_padding]
if h_padding > 0:
features = features[:,h_padding:-h_padding]
# reshape and extract data
X = features.reshape([-1, features.shape[-1]])
y = labels.reshape(-1)
X = X[y != 0]
y = y[y != 0]
# define and fit classifier
clf = RandomForestClassifier(n_estimators=10)
if len(X) > 0:
clf = clf.fit(X, y)
return clf, features
|
dbd6e40b0d6ef363a55ad8f15ec2859d16c676e4
| 3,646,663
|
import json
import threading
def himydata_client(args):
"""Returns an instance of Himydata Client or DryRunClient"""
if args.dry_run:
return DryRunClient()
else:
with open(args.config) as input:
config = json.load(input)
if not config.get('disable_collection', True):
logger.info('Sending version information to stitchdata.com. ' +
'To disable sending anonymous usage data, set ' +
'the config parameter "disable_collection" to true')
threading.Thread(target=collect).start()
missing_fields = []
if 'client_id' in config:
client_id = config['client_id']
else:
missing_fields.append('client_id')
if 'himydata_url' in config:
himydata_url = config['himydata_url']
else:
himydata_url = DEFAULT_HIMYDATA_URL
if 'api_key' in config:
api_key = config['api_key']
else:
missing_fields.append('api_key')
if missing_fields:
raise Exception('Configuration is missing required fields: {}'
.format(missing_fields))
return Client(client_id, api_key, himydata_url=himydata_url, callback_function=write_last_state)
|
21f704868c583d85c4d095da88654e729f28ef28
| 3,646,664
|
def KeywordString():
"""Returns the specified Keyword String
@note: not used by most modules
"""
return ST_KEYWORDS[1]
|
f43b32c32c16998d91c2dbf0cf6173f402c89333
| 3,646,665
|
def coo_index_to_data(index):
"""
Converts data index (row, col) to 1-based pixel-centerd (x,y) coordinates of the center ot the pixel
index: (int, int) or int
(row,col) index of the pixel in dtatabel or single row or col index
"""
return (index[1] + 1.0, index[0] + 1.0)
|
5cf3ee2cc4ea234aaeb2d9de97e92b41c5daf149
| 3,646,666
|
def prepare_output_well(df, plates, output, rawdata, identifier_features, location_features):
""" Prepare the output file with plate, row and column information
Calculate penetrance and p-value
Args:
df: Existing combined dictionary
plates: Plates in this screen
output: Output filenames
identifier_features: List of strain identifiers
location_features: List of Plate - Row - Column - Filename
Return:
final_df_output: Combined outlier detection results
"""
print('Preparing the output values by well...')
log_write(output['log'], 'Preparing penetrance results by well...\n')
# Create new dataframe from dict
append_list = identifier_features + location_features + ['Is_Inlier']
final_df = dataframe_from_dict(df, append_list)
if 'Row' in final_df.columns:
well_identifier = 'Row_Col'
else:
for f in location_features:
if 'well' in f.lower():
well_identifier = f
try:
final_df[well_identifier] = final_df.Row.map(int).map(str) + '_' + final_df.Column.map(int).map(str)
except AttributeError:
final_df[well_identifier] = final_df[well_identifier].map(str)
# Initialize output folder
final_df_output = pd.DataFrame(columns = identifier_features + location_features +
['Num_cells', 'Penetrance', 'P-value'])
this_row = 0
# Regroup this dataframes by plates then row column info
WT_cells, WT_cells_outliers = p_value(df)
plate_column = 'Plate'
for p in plates:
final_df_plate = final_df[final_df[plate_column] == p]
# Regroup this dataframes by Row and Column
row_col = final_df_plate[well_identifier].unique().tolist()
for rc in row_col:
df_rc = final_df_plate[final_df_plate[well_identifier] == rc]
is_inlier_rc = np.asarray(df_rc['Is_Inlier'])
num_cells = df_rc.shape[0]
num_outliers = sum(is_inlier_rc == 0)
pene = float(num_outliers) / num_cells * 100
pval = 1 - stats.hypergeom.cdf(num_outliers, WT_cells, WT_cells_outliers, num_cells)
# Append them to corresponding variables
line = []
for i in identifier_features + location_features:
if 'plate' in i.lower():
i = 'Plate'
line.append(df_rc[i].unique()[0])
line.append(num_cells)
line.append(pene)
line.append(pval)
final_df_output.loc[this_row, ] = line
this_row += 1
# Save into a dataframe
final_df_output = final_df_output.sort_values('Penetrance', ascending=False)
final_df_output = final_df_output.reset_index(drop=True)
final_df_output.to_csv(path_or_buf=output['ODresultsWell'], index=False)
return final_df_output
|
907c4523ca15b52a66947864f12e4375816d047a
| 3,646,667
|
def main(input_file):
"""Solve puzzle and connect part 1 with part 2 if needed."""
inp = read_input(input_file)
p1, p2 = part_1_and_2(inp)
print(f"Solution to part 1: {p1}")
print(f"Solution to part 2: {p2}")
return p1, p2
|
6edd09292e6ba0bccfcb1923e2f3011f47dfb331
| 3,646,668
|
def generateListPermutations(elements, level=0):
"""Generate all possible permutations of the list 'elements'."""
#print(" " * level, "gP(", elements, ")")
if len(elements) == 0:
return [[]]
permutations = []
for e in elements:
reduced = elements[:]
reduced.remove(e)
reducedPermutations = generateListPermutations(reduced, level + 1)
#print(" "*level, "reduced", reducedPermutations)
for p in reducedPermutations:
p.insert(0, e)
permutations.append(p)
return permutations
|
1894b6726bedaaf634e8c7ac56fc1abd9e204eef
| 3,646,669
|
def has_security_updates(update_list):
"""
Returns true if there are security updates available.
"""
return filter_updates(update_list, 'category', lambda x: x == 'security')
|
57fe7b66b3e678e50a2b44593ea29c26bdee12ac
| 3,646,671
|
def lobby():
"""Return an unchecked place named lobby."""
return UncheckedPlace("Lobby")
|
18bab877773086b9a29f4184c6948629e2162e4f
| 3,646,672
|
import torch
def get_random_sample_indices(
seq_len, num_samples=100, device=torch.device("cpu")):
"""
Args:
seq_len: int, the sampled indices will be in the range [0, seq_len-1]
num_samples: sample size
device: torch.device
Returns:
1D torch.LongTensor consisting of sorted sample indices
(sort should not affect the results as we use transformers)
"""
if num_samples >= seq_len:
# return all indices
sample_indices = np.arange(seq_len)
else:
sample_indices = np.random.choice(
seq_len, size=num_samples, replace=False)
sample_indices = np.sort(sample_indices)
return torch.from_numpy(sample_indices).long().to(device)
|
e63eeb3a06687bcdbd9b1b9947db83caa5080b62
| 3,646,673
|
import hashlib
import re
def render_mesh_as_dot(mesh, template=DOT_TEMPLATE):
"""Renders the given mesh in the Graphviz dot format.
:param Mesh mesh: the mesh to be rendered
:param str template: alternative template to use
:returns: textual dot representation of the mesh
"""
custom_filters = {
'hash': lambda s: "id" + hashlib.md5(s).hexdigest()[:6],
# alternative hash for provides ports to avoid conflicts with needs ports with same name
'hash_p': lambda s: "idp" + hashlib.md5(s).hexdigest()[:6],
'escape': lambda s: re.sub(r'([{}|"<>])', r'\\\1', s),
}
return render(mesh, template, custom_filters=custom_filters)
|
34826db4a1ab16cc07913b0381881876620551de
| 3,646,674
|
def snrest(noisy: np.ndarray, noise: np.ndarray, axis=None):
"""
Computes SNR [in dB] when you have:
"noisy" signal+noise time series
"noise": noise only without signal
"""
Psig = ssq(noisy, axis)
Pnoise = ssq(noise)
return 10 * np.log10(Psig / Pnoise)
|
f7fb5c00324dfe81d225cb52d0a937c568222824
| 3,646,675
|
from pathlib import Path
def load(spect_path, spect_format=None):
"""load spectrogram and related arrays from a file,
return as an object that provides Python dictionary-like
access
Parameters
----------
spect_path : str, Path
to an array file.
spect_format : str
Valid formats are defined in vak.io.spect.SPECT_FORMAT_LOAD_FUNCTION_MAP.
Default is None, in which case the extension of the file is used.
Returns
-------
spect_dict : dict-like
either a dictionary or dictionary-like object that provides access to arrays
from the file via keys, e.g. spect_dict['s'] for the spectrogram.
See docstring for vak.audio.to_spect for default keys for spectrogram
array files that function creates.
"""
spect_path = Path(spect_path)
if spect_format is None:
# "replace('.', '')", because suffix returns file extension with period included
spect_format = spect_path.suffix.replace('.', '')
spect_dict = constants.SPECT_FORMAT_LOAD_FUNCTION_MAP[spect_format](spect_path)
return spect_dict
|
2999c8745f7feecb0174f5a230194c6b098c606c
| 3,646,676
|
def PSNR(a, b, max_val=255.0, name=None):
"""Returns the Peak Signal-to-Noise Ratio between a and b.
Arguments:
a: first set of images.
b: second set of images.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
name: namespace to embed the computation in.
Returns:
The scalar PSNR between a and b. The shape of the returned tensor is
[batch_size, 1].
"""
with tf.name_scope(name, 'PSNR', [a, b]):
psnr = tf.image.psnr(a, b, max_val=max_val, name=name)
_, _, checks = VerifyCompatibleImageShapes(a, b)
with tf.control_dependencies(checks):
return tf.identity(psnr)
|
0a140abc081b5faf0cbd1a9e4c67b4db62794f5d
| 3,646,677
|
import functools
def command(f):
""" indicate it's a command of naviseccli
:param f: function that returns the command in list
:return: command execution result
"""
@functools.wraps(f)
def func_wrapper(self, *argv, **kwargs):
if 'ip' in kwargs:
ip = kwargs['ip']
del kwargs['ip']
else:
ip = None
commands = _get_commands(f, self, *argv, **kwargs)
return self.execute(commands, ip=ip)
return func_wrapper
|
71160e4af7d4d64ca2a7ecd059d0c53f9e339308
| 3,646,678
|
def clean_data(file_name):
"""
file_name: file to be cleaned
This function converts the data types in the original dataframe into more suitable type.
The good news is that the orginal dataframe is already in good shape so there's less to do.
"""
df_input = pd.read_excel(file_name,sheet_name = "IPU2016")
#Checking the basic information about the dataframe (optional)
#print(df_input.info(), df_input.describe())
#print(df_input["Lokasi"].unique())
#Making a copy of the dataframe
df_output = df_input.copy()
#Change column name for consistency
df_output = df_input.rename(columns = {"Tarikh":"Date",
"API":"API_Values",
"Masa":"Time"})
#Note that there is no dominant pollutant data for this dataset
#Converting the date into datetime
df_output["Date"] = df_output["Date"].astype(str)
df_output["Time"] = df_output["Time"].astype(str)
df_output["Datetime"] = df_output[["Date","Time"]].agg("-".join, axis = 1)
df_output["Datetime"] = pd.to_datetime(df_output["Datetime"], format = "%Y%m%d-%I:%M:%S")
#Creating new columns "Area" based on "Lokasi" for consistency
#The area and state allocated are based on the categorization of other dataframes
#the dictionary is organized in the following form: Lokasi: Area
#Note that there are subtle differences in the input Lokasi values so the directory from previous data cleaning python doc is not applicable
df_output["Lokasi"] = df_output["Lokasi"].astype(str)
df_output["Lokasi"] = df_output["Lokasi"].str.rstrip()
area_directory = {"Sek. Men. Pasir Gudang 2, Pasir Gudang": "Pasir Gudang",
"Institut Perguruan Malaysia, Temenggong Ibrahim, Larkin, Johor Bharu": "Lakrin Lama",
"Sek. Men. Teknik Muar, Muar, Johor": "Muar",
"SMA, Bandar Penawar, Kota Tinggi": "Kota Tinggi",
"Sek. Keb. Bakar Arang, Sungai Petani": "Bakar Arang, Sg. Petani",
"Komplek Sukan Langkawi, Kedah": "Langkawi",
"Sek. Men. Agama Mergong, Alor Setar": "Alor Setar",
"Sek. Men. Keb. Tanjung Chat, Kota Bahru": "SMK Tanjung Chat, Kota Bharu",
"SMK. Tanah Merah": "Tanah Merah",
"Sek. Men. Keb. Bukit Rambai": "Bukit Rambai",
"Sek. Men. Tinggi Melaka, Melaka": "Bandaraya Melaka",
"Tmn. Semarak (Phase II), Nilai": "Nilai",
"Sek. Men. Teknik Tuanku Jaafar, Ampangan, Seremban": "Seremban",
"Pusat Sumber Pendidikan N.S. Port Dickson": "Port Dickson",
"Pej. Kajicuaca Batu Embun, Jerantut": "Jerantut",
"Sek. Keb. Indera Mahkota, Kuantan": "Indera Mahkota, Kuantan",
"Sek. Keb. Balok Baru, Kuantan": "Balok Baru, Kuantan",
"Sek. Men. Jalan Tasek, Ipoh": "Jalan Tasek, Ipoh",
"Sek. Men. Keb. Air Puteh, Taiping": "Kg. Air Putih, Taiping",
"Pejabat Pentadbiran Daerah Manjung, Perak": "Seri Manjung",
"Universiti Pendidikan Sultan Idris, Tanjung Malim": "Tanjung Malim",
"Sek. Men. Pegoh, Ipoh, Perak": "S K Jalan Pegoh, Ipoh",
"Institut Latihan Perindustrian (ILP) Kangar": "Kangar",
"Sek. Keb. Cederawasih, Taman Inderawasih, Perai": "Perai",
"Sek. Keb. Seberang Jaya II, Perai": "Seberang Jaya 2, Perai",
"Universiti Sains Malaysia, Pulau Pinang": "USM",
"Sek. Men. Keb Putatan, Tg Aru, Kota Kinabalu": "Kota Kinabalu",
"Pejabat JKR Tawau, Sabah": "Tawau",
"Sek. Men. Keb Gunsanad, Keningau": "Keningau",
"Pejabat JKR Sandakan, Sandakan": "Sandakan",
"Medical Store, Kuching": "Kuching",
"Ibu Pejabat Polis Sibu, Sibu": "Sibu",
"Balai Polis Pusat Bintulu": "Bintulu",
"Sek. Men. Dato Permaisuri Miri": "Miri",
"Balai Polis Pusat Sarikei": "Sarikei",
"Dewan Suarah, Limbang": "Limbang",
"Pejabat Daerah Samarahan, Kota Samarahan": "Samarahan",
"Kompleks Sukan, Sri Aman": "Sri Aman",
"Stadium Tertutup, Kapit": "Kapit",
"ILP MIRI": "ILP Miri",
"Sek. Men. (P) Raja Zarina, Kelang": "Pelabuhan Kelang",
"Sek. Keb. Bandar Utama, Petaling Jaya": "Petaling Jaya",
"Sek. Keb. TTDI Jaya, Shah Alam": "Shah Alam",
"Sekolah Menengah Sains, Kuala Selangor": "Kuala Selangor",
"Kolej MARA, Banting": "Banting",
"Sek. Ren. Keb. Bukit Kuang, Teluk Kalung, Kemaman": "Kemaman",
"Kuarters TNB, Paka-Kertih": "Paka",
"Sek. Keb. Chabang Tiga, Kuala Terengganu": "Kuala Terengganu",
"Taman Perumahan Majlis Perbandaran Labuan": "Labuan",
"Sek. Keb. Putrajaya 8(2), Jln P8/E2, Presint 8, Putrajaya": "Putrajaya",
"Sek.Men.Keb.Seri Permaisuri, Cheras": "Cheras,Kuala Lumpur",
"Sek. Keb. Batu Muda, Batu Muda, Kuala Lumpur": "Batu Muda,Kuala Lumpur"}
#Create column "Area"
df_output["Area"] = df_output["Lokasi"].map(area_directory)
#Create column "State"
#Since there is very little tokens, mapping a dictionary will be faster
state_directory = {"JOHOR": "Johor",
"KEDAH": "Kedah",
"KELANTAN": "Kelantan",
"MELAKA": "Melaka",
"N.SEMBILAN": "Negeri Sembilan",
"PAHANG": "Pahang",
"PERAK": "Perak",
"PERLIS": "Perlis",
"PULAU PINANG": "Pulau Pinang",
"SABAH": "Sabah",
"SARAWAK": "Sarawak",
"SELANGOR": "Selangor",
"TERENGGANU": "Terengganu",
"WILAYAH PERSEKUTUAN": "Wilayah Persekutuan"}
df_output["State"] = df_output["Negeri"].map(state_directory)
df_output = df_output.drop(columns = ["Date", "Time", "Lokasi", "Negeri"])
#Checking the basic information about the final dataframe (optional)
#print(df_output.info())
#Export output to new csv file (edit path and name as needed)
df_extract.to_csv(r"file_path\file_name.csv")
return df_output
|
cd283c97004f013622e4d2022a9c63424248a7f0
| 3,646,679
|
def get_contact_pages(buffer, domain):
"""
Returns links to all possible contact pages found on the site index page
"""
usual_contact_titles = [u'Contact', u'Contacts', u'About', u'Контакты', u'Связаться с нами']
usual_contact_urls = ['/contact', '/contacts', '/info']
result = list()
html = fromstring(buffer)
for a in html.xpath('//a'):
title = a.text_content().strip()
url = a.get('href')
if url is None:
continue
if title in usual_contact_titles or url in usual_contact_urls:
result.append(normalize_url(url, domain))
del html
return list(set(result))
|
ed8f164fe7f85181df011067acb098f771ccc9fa
| 3,646,680
|
def _hydrate_active_votes(vote_csv):
"""Convert minimal CSV representation into steemd-style object."""
if not vote_csv:
return []
votes = []
for line in vote_csv.split("\n"):
voter, rshares, percent, reputation = line.split(',')
votes.append(dict(voter=voter,
rshares=rshares,
percent=percent,
reputation=rep_to_raw(reputation)))
return votes
|
a1903578b1a0a0b3ab1e1f5c60982cfcc2d766a9
| 3,646,681
|
import random
def skew(width, height, magnitude, mode='random'):
"""
Skew the ChArUco in 4 different modes.
:param width:
:param height:
:param magnitude:
:param mode: 0: top narrow, 1: bottom narrow, 2: left skew, 3 right skew
:return:
"""
# Randomize skew
if mode == 'random':
mode = random.randint(0, 3)
# Translate skew mode into transform coefficients
if mode == 0:
coeffs = find_coeffs(
[(magnitude, 0), (width - magnitude, 0), (width, height), (0, height)],
[(0, 0), (width, 0), (width, height), (0, height)])
elif mode == 1:
coeffs = find_coeffs(
[(0, 0), (width, 0), (width - magnitude, height), (magnitude, height)],
[(0, 0), (width, 0), (width, height), (0, height)])
elif mode == 2:
coeffs = find_coeffs(
[(0, 0), (width, 0), (width + magnitude, height), (magnitude, height)],
[(0, 0), (width, 0), (width, height), (0, height)])
elif mode == 3:
coeffs = find_coeffs(
[(magnitude, 0), (width + magnitude, 0), (width, height), (0, height)],
[(0, 0), (width, 0), (width, height), (0, height)])
return coeffs
|
5212b5597dcf3052c73fe8321771386a3470f9a0
| 3,646,682
|
def aws_ec2_pricing():
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get EC2 pricing per gibabyte in all regions and storage types
summary: *desc
responses:
200:
description: List of instance types
schema:
properties:
instances:
type: array
items:
properties:
instanceType:
type: string
location:
type: string
prices:
type: array
items:
properties:
type:
type: string
costPerHour:
type: float
upfrontCost:
type: float
reservationYears:
type: integer
403:
description: Not logged in
"""
return jsonify(instances=ec2pricing.get_pricing_data())
|
3f785a5ea3ca5a18a1e8e4e2c8a7651cafbe3c2c
| 3,646,683
|
import logging
def create_app():
"""
Create the application and return it to the user
:return: flask.Flask application
"""
app = Flask(__name__, static_folder=None)
app.url_map.strict_slashes = False
# Load config and logging
load_config(app)
logging.config.dictConfig(
app.config['SLACKBACK_LOGGING']
)
# Register extensions
api = Api(app)
# Add end points
api.add_resource(SlackFeedback, '/feedback/slack')
return app
|
1a4dbade0d3ce61c458c0e24e093cfd3d7020cdb
| 3,646,684
|
def _get_graph_cls(name):
"""Get scaffoldgraph class from name string."""
if name == 'network':
return ScaffoldNetwork
elif name == 'tree':
return ScaffoldTree
elif name == 'hiers':
return HierS
else:
msg = f'scaffold graph type: {name} not known'
raise ValueError(msg)
|
5279cf2518a0a5c3c89766caf41fa9e29d694ae3
| 3,646,685
|
import requests
def getgrayim(ra, dec, size=240, output_size=None, filter="g", format="jpg"):
"""Get grayscale image at a sky position
ra, dec = position in degrees
size = extracted image size in pixels (0.25 arcsec/pixel)
output_size = output (display) image size in pixels (default = size).
output_size has no effect for fits format images.
filter = string with filter to extract (one of grizy)
format = data format (options are "jpg", "png")
Returns the image
"""
if format not in ("jpg","png"):
raise ValueError("format must be jpg or png")
if filter not in list("grizy"):
raise ValueError("filter must be one of grizy")
url = geturl(ra,dec,size=size,filters=filter,output_size=output_size,format=format)
r = requests.get(url[0])
im = Image.open(BytesIO(r.content))
return im
|
ca5e92580e428f586800419f1ef7e444fa432a25
| 3,646,686
|
import time
def elastic_transform(x, alpha, sigma, mode="constant", cval=0, is_random=False):
"""Elastic transformation for image as described in `[Simard2003] <http://deeplearning.cs.cmu.edu/pdfs/Simard.pdf>`__.
Parameters
-----------
x : numpy.array
A greyscale image.
alpha : float
Alpha value for elastic transformation.
sigma : float or sequence of float
The smaller the sigma, the more transformation. Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.
mode : str
See `scipy.ndimage.filters.gaussian_filter <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.filters.gaussian_filter.html>`__. Default is `constant`.
cval : float,
Used in conjunction with `mode` of `constant`, the value outside the image boundaries.
is_random : boolean
Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
>>> x = tl.prepro.elastic_transform(x, alpha=x.shape[1]*3, sigma=x.shape[1]*0.07)
References
------------
- `Github <https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a>`__.
- `Kaggle <https://www.kaggle.com/pscion/ultrasound-nerve-segmentation/elastic-transform-for-data-augmentation-0878921a>`__
"""
if is_random is False:
random_state = np.random.RandomState(None)
else:
random_state = np.random.RandomState(int(time.time()))
#
is_3d = False
if len(x.shape) == 3 and x.shape[-1] == 1:
x = x[:, :, 0]
is_3d = True
elif len(x.shape) == 3 and x.shape[-1] != 1:
raise Exception("Only support greyscale image")
if len(x.shape) != 2:
raise AssertionError("input should be grey-scale image")
shape = x.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=mode, cval=cval) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=mode, cval=cval) * alpha
x_, y_ = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x_ + dx, (-1, 1)), np.reshape(y_ + dy, (-1, 1))
if is_3d:
return map_coordinates(x, indices, order=1).reshape((shape[0], shape[1], 1))
else:
return map_coordinates(x, indices, order=1).reshape(shape)
|
50af01bf47b5273cfc37b136f546f4e0fd6f24bb
| 3,646,688
|
def create_markdown(
escape=True,
renderer=None,
plugins=None,
acronyms=None,
bibliography="",
chapters=False,
toc=False,
):
"""Create a Markdown instance based on the given condition.
:param escape: Boolean. If using html renderer, escape html.
:param renderer: renderer instance or string of ``html`` and ``ast``.
:param plugins: List of plugins, string or callable.
This method is used when you want to re-use a Markdown instance::
markdown = create_markdown(
escape=False,
renderer='html',
plugins=['url', 'strikethrough', 'footnotes', 'table'],
)
# re-use markdown function
markdown('.... your text ...')
"""
if renderer is None or renderer == "latex":
renderer = LaTeXRenderer(acronym_file=acronyms, chapters=chapters)
if renderer == "html":
renderer = HTMLRenderer(escape=escape)
elif renderer == "ast":
renderer = AstRenderer()
if plugins:
_plugins = []
for p in plugins:
if isinstance(p, str):
_plugins.append(PLUGINS[p])
else:
_plugins.append(p)
plugins = _plugins
return ExtendedMarkdown(
renderer,
inline=ExtendedInlineParser(renderer, chapters=chapters),
block=ExtendedBlockParser(),
plugins=plugins,
bibliography=bibliography,
chapters=chapters,
toc=toc,
)
|
7db137aacc35a9a4171ef8f7ac8a50adc4e94e79
| 3,646,689
|
def _split(num):
"""split the num to a list of every bits of it"""
# xxxx.xx => xxxxxx
num = num * 100
result = []
for i in range(16):
tmp = num // 10 ** i
if tmp == 0:
return result
result.append(tmp % 10)
return result
|
575068b9b52fdff08522a75d8357db1d0ab86546
| 3,646,691
|
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in xrange(image.get_width()):
mask.append([])
for y in xrange(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
|
6bc38cc7d92f9b8d11241c5559c852d1ac204a60
| 3,646,692
|
def get_args_kwargs_param_names(fparams) -> (str, str):
"""fparams is inspect.signature(f).parameters
for some function f.
Doctests:
>>> import inspect
>>> def f(): pass
>>> get_args_kwargs_param_names(inspect.signature(f).parameters)
(None, None)
>>> def f(*args): pass
>>> get_args_kwargs_param_names(inspect.signature(f).parameters)
('args', None)
>>> def f(a, b, *filters, **kwargs): pass
>>> get_args_kwargs_param_names(inspect.signature(f).parameters)
('filters', 'kwargs')
>>> def f(x, y, z, user='Joe', **other_users): pass
>>> get_args_kwargs_param_names(inspect.signature(f).parameters)
(None, 'other_users')
"""
args_name = None
kwargs_name = None
for name in fparams:
param = fparams[name]
if param.kind == param.VAR_KEYWORD:
kwargs_name = name
elif param.kind == param.VAR_POSITIONAL:
args_name = name
if args_name and kwargs_name:
break # found both: done
return args_name, kwargs_name
|
948688ca6ba9908dd8bccc0c5232ebbe6fdb071f
| 3,646,693
|
def clean_up_tokenization_spaces(out_string):
"""Converts an output string (de-BPE-ed) using de-tokenization algorithm from OpenAI GPT."""
out_string = out_string.replace('<unk>', '')
out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ','
).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
return out_string
|
0bd51ca7dbaa36569c0d2f18d510f1c6a92e1822
| 3,646,694
|
def upload_pkg(go_workspace, pkg_file, service_url, tags, service_account):
"""Uploads existing *.cipd file to the storage and tags it.
Args:
go_workspace: path to 'infra/go' or 'infra_internal/go'.
pkg_file: path to *.cipd file to upload.
service_url: URL of a package repository service.
tags: a list of tags to attach to uploaded package instance.
service_account: path to *.json file with service account to use.
Returns:
{'package': <name>, 'instance_id': <sha1>}
Raises:
UploadException on error.
"""
print_title('Uploading: %s' % os.path.basename(pkg_file))
args = ['-service-url', service_url]
for tag in sorted(tags):
args.extend(['-tag', tag])
args.extend(['-ref', 'latest'])
if service_account:
args.extend(['-service-account-json', service_account])
args.append(pkg_file)
exit_code, json_output = run_cipd(go_workspace, 'pkg-register', args)
if exit_code:
print
print >> sys.stderr, 'FAILED! ' * 10
raise UploadException('Failed to upload the CIPD package, see logs')
info = json_output['result']
print '%s %s' % (info['package'], info['instance_id'])
return info
|
edc290546f14d53bd1b5b725519be0510f25e60b
| 3,646,695
|
def make_rgg(n: int, kbar: float) -> ig.Graph:
"""Make Random Geometric Graph with given number of nodes
and average degree.
"""
radius = np.sqrt(kbar/(np.pi*(n-1)))
return ig.Graph.GRG(n, radius=radius, torus=True)
|
d44cc49284196234bbc2c46c93516e6dea182f4d
| 3,646,696
|
def list_directory(bucket, prefix, s3=None, request_pays=False):
"""AWS s3 list directory."""
if not s3:
session = boto3_session(region_name=region)
s3 = session.client('s3')
pag = s3.get_paginator('list_objects_v2')
params = {
'Bucket': bucket,
'Prefix': prefix,
'Delimiter': '/'}
if request_pays:
params['RequestPayer'] = 'requester'
directories = []
for subset in pag.paginate(**params):
if 'CommonPrefixes' in subset.keys():
directories.extend(subset.get('CommonPrefixes'))
return [r['Prefix'] for r in directories]
|
a008e6508dc000959c08b5f736951f9c664f0c91
| 3,646,697
|
from datetime import datetime
def manual_overrides():
"""Read the overrides file.
Read the overrides from cache, if available. Otherwise, an attempt is made
to read the file as it currently stands on GitHub, and then only if that
fails is the included file used. The result is cached for one day.
"""
return _manual_overrides(datetime.date.today())
|
35f64fea6a8a7923e1c94d88604798d3b014b69f
| 3,646,698
|
def _ExtractCLPath(output_of_where):
"""Gets the path to cl.exe based on the output of calling the environment
setup batch file, followed by the equivalent of `where`."""
# Take the first line, as that's the first found in the PATH.
for line in output_of_where.strip().splitlines():
if line.startswith('LOC:'):
return line[len('LOC:'):].strip()
|
6a0c0d4aa74b4e84de69de023e2721edd95c36bd
| 3,646,699
|
def preprocess_data_for_clustering(df):
"""Prepare data in order to apply a clustering algorithm
Parameters
----------
df : pandas.DataFrame
Input data, *i.e.* city-related timeseries, supposed to have
`station_id`, `ts` and `nb_bikes` columns
Returns
-------
pandas.DataFrame
Simpified version of `df`, ready to be used for clustering
"""
# Filter unactive stations
max_bikes = df.groupby("station_id")["nb_bikes"].max()
unactive_stations = max_bikes[max_bikes==0].index.tolist()
active_station_mask = np.logical_not(df['station_id'].isin(unactive_stations))
df = df[active_station_mask]
# Set timestamps as the DataFrame index and resample it with 5-minute periods
df = (df.set_index("ts")
.groupby("station_id")["nb_bikes"]
.resample("5T")
.mean()
.bfill())
df = df.unstack(0)
# Drop week-end records
df = df[df.index.weekday < 5]
# Gather data regarding hour of the day
df['hour'] = df.index.hour
df = df.groupby("hour").mean()
return df / df.max()
|
144c701b3be12aed2a1488a08eb05c65c6d704c5
| 3,646,700
|
def chars(line):
"""Returns the chars in a TerminalBuffer line.
"""
return "".join(c for (c, _) in notVoids(line))
|
1ffad7e9d0cc800f8de579fa30aeb108a12bd8d2
| 3,646,701
|
def map_is_finite(query_points: tf.Tensor, observations: tf.Tensor) -> Dataset:
"""
:param query_points: A tensor.
:param observations: A tensor.
:return: A :class:`~trieste.data.Dataset` containing all the rows in ``query_points``,
along with the tensor result of mapping the elements of ``observations`` to: `1` if they are
a finite number, else `0`, with dtype `tf.uint8`.
:raise ValueError or InvalidArgumentError: If ``query_points`` and ``observations`` do not
satisfy the shape constraints of :class:`~trieste.data.Dataset`.
"""
return Dataset(query_points, tf.cast(_is_finite(observations), tf.uint8))
|
e22571a179acfb8261924eaf71dec40d17adc47d
| 3,646,702
|
def docker_image_exists(args, image): # type: (EnvironmentConfig, str) -> bool
"""Return True if the image exists, otherwise False."""
try:
docker_command(args, ['image', 'inspect', image], capture=True)
except SubprocessError:
return False
return True
|
ba2eedacef0179b25d203f00cf42fb4f4e4f9b72
| 3,646,703
|
def get_g2_fit_general_two_steps(
g2,
taus,
function="simple_exponential",
second_fit_range=[0, 20],
sequential_fit=False,
*argv,
**kwargs,
):
"""
Fit g2 in two steps,
i) Using the "function" to fit whole g2 to get baseline and beta (contrast)
ii) Then using the obtained baseline and beta to fit g2 in a "second_fit_range" by using simple_exponential function
"""
g2_fit_result, taus_fit, g2_fit = get_g2_fit_general(
g2, taus, function, sequential_fit, *argv, **kwargs
)
guess_values = {}
for k in list(g2_fit_result[0].params.keys()):
guess_values[k] = np.array(
[g2_fit_result[i].params[k].value for i in range(g2.shape[1])]
)
if "guess_limits" in kwargs:
guess_limits = kwargs["guess_limits"]
else:
guess_limits = dict(
baseline=[1, 1.8],
alpha=[0, 2],
beta=[0.0, 1],
relaxation_rate=[0.001, 10000],
)
g2_fit_result, taus_fit, g2_fit = get_g2_fit_general(
g2,
taus,
function="simple_exponential",
sequential_fit=sequential_fit,
fit_range=second_fit_range,
fit_variables={
"baseline": False,
"beta": False,
"alpha": False,
"relaxation_rate": True,
},
guess_values=guess_values,
guess_limits=guess_limits,
)
return g2_fit_result, taus_fit, g2_fit
|
d02fbf1796e00b8f490a97a7f7274bab1233a823
| 3,646,705
|
import doctest
def run_doctest(module, verbosity=None):
"""Run doctest on the given module. Return (#failures, #tests).
If optional argument verbosity is not specified (or is None), pass
test_support's belief about verbosity on to doctest. Else doctest's
usual behavior is used (it searches sys.argv for -v).
"""
if verbosity is None:
verbosity = verbose
else:
verbosity = None
# Direct doctest output (normally just errors) to real stdout; doctest
# output shouldn't be compared by regrtest.
save_stdout = sys.stdout
sys.stdout = get_original_stdout()
try:
f, t = doctest.testmod(module, verbose=verbosity)
if f:
raise TestFailed("%d of %d doctests failed" % (f, t))
finally:
sys.stdout = save_stdout
if verbose:
print 'doctest (%s) ... %d tests with zero failures' % \
(module.__name__, t)
return f, t
|
cc50538cf8cf50959a4c67eb1c37010d2eab45a9
| 3,646,706
|
def rate_matrix_arrhenius_time_segmented(energies, barriers, segment_temperatures, segment_start_times, t_range):
"""
Compute the rate matrix for each time ``t`` in ``t_range``, where the bath temperature is a piecewise constant
function of time.
The bath temperature function, by which the rate matrices are calculated, is a piecewise constant function where
each piece is a segment described by the its temperature and the time it starts.
First, the temperature for every time, denoted by ``T(t)``, is calculated as follows:
``T(t) = Ti`` where ``t = segment_start_times[i]`` and ``Ti = segment_temperatures[i]``.
Then, for every time ``t`` in ``t_range``, a rate matrix is calculated with the corresponding temperature ``T(t)``.
The bath temperature is set to the last given temperature ``segment_start_times[-1]`` and stays at this value
until the last time ``t`` in ``t_range``.
Parameters
----------
energies : (N,) array or sequence of float
Energies of the states of the arrhenius, ordered in ascending order.
barriers : (N, N) array
Energy barriers between states. Must be given as matrix.
segment_temperatures : (K,) array
Temperature sequence where each temperature corresponds to each segment.
segment_start_times : (K,) array
Start time sequence where each time corresponds to each segment.
t_range : (M,) array
Time sequence.
Returns
-------
rate_matrix_time : (N, N, M)
Rate matrices stacked in the depth dimension (axis=2).
Raises
-----
ValueError
If the first segment start time ``segment_start_times[0]`` is not equal to ``t_range[0]``.
"""
if segment_start_times[0] != t_range[0]:
raise ValueError('The first segment start time `segment_start_times[0]` must be equal to `t_range[0]`.')
temperature_array = temperature_array_from_segments(segment_temperatures, segment_start_times, t_range)
return rate_matrix_arrhenius(energies, barriers, temperature_array)
|
78245c7452a41af91be5f57f0acc72cc67c2e2e0
| 3,646,707
|
def div_tensor(tensor, coords=(x, y, z), h_vec=(1, 1, 1)):
"""
Divergence of a (second order) tensor
Parameters
----------
tensor : Matrix (3, 3)
Tensor function function to compute the divergence from.
coords : Tuple (3), optional
Coordinates for the new reference system. This is an optional
parameter it takes (x, y, z) as default.
h_vec : Tuple (3), optional
Scale coefficients for the new coordinate system. It takes
(1, 1, 1), as default.
Returns
-------
divergence: Matrix
Divergence of tensor.
References
----------
.. [RICHARDS] Rowland Richards. Principles of Solids Mechanics.
CRC Press, 2011.
"""
h1, h2, h3 = h_vec
u1, u2, u3 = coords
div1 = diff(h2*h3*tensor[0, 0], u1) + diff(h1*h3*tensor[0, 1], u2) \
+ diff(h1*h2*tensor[0, 2], u3) + h3*tensor[0, 1]*diff(h1, u2) \
+ h2*tensor[0, 2]*diff(h1, u3) - h3*tensor[1, 1]*diff(h2, u1) \
- h2*tensor[2, 2]*diff(h3, u1)
div2 = diff(h2*h3*tensor[1, 0], u1) + diff(h1*h3*tensor[1, 1], u2) \
+ diff(h1*h2*tensor[1, 2], u3) + h1*tensor[1, 2]*diff(h2, u3) \
+ h3*tensor[1, 0]*diff(h2, u1) - h1*tensor[2, 2]*diff(h3, u2) \
- h3*tensor[2, 2]*diff(h1, u2)
div3 = diff(h2*h3*tensor[2, 0], u1) + diff(h1*h3*tensor[2, 1], u2) \
+ diff(h1*h2*tensor[2, 2], u3) + h2*tensor[2, 0]*diff(h1, u1) \
+ h1*tensor[2, 1]*diff(h1, u2) - h1*tensor[1, 1]*diff(h2, u3) \
+ h2*tensor[2, 2]*diff(h1, u3)
return Matrix([div1, div2, div3])/(h1*h2*h3)
|
8159dfc8b330f9184c336ba4cecabe2fdf0d7d55
| 3,646,708
|
import ast
def convert_path_to_repr_exp(path, with_end=False):
"""
Generate a representative expression for the given path
"""
exp = ""
#print("Path: {}".format(path))
for i in range(len(path)):
if with_end == False and \
((i == 0) or (i == len(path)-1)):
continue
nd_idx = path[i]
if nd_idx == start_state:
exp += "BOS"
continue
if nd_idx == end_state:
exp += "EOS"
continue
node_content = idx_to_node[nd_idx]
#print("Node content: {}".format(node_content))
node_dic = ast.literal_eval(str(node_content))
text = ""
for key, value in node_dic.items():
text = value[1]
break
exp += ' ' + text
return exp
|
68dd8ea13ecdef6f2c3947a1e9341ff5e10ccf78
| 3,646,709
|
def cluster_create(context, values):
"""Create a cluster from the values dictionary."""
return IMPL.cluster_create(context, values)
|
593577a3d912a6a24e8f6d3b66d66e1d8f39a681
| 3,646,710
|
def compute_heading_error(est, gt):
"""
Args:
est: the estimated heading as sin, cos values
gt: the ground truth heading as sin, cos values
Returns:
MSE error and angle difference from dot product
"""
mse_error = np.mean((est-gt)**2)
dot_prod = np.sum(est * gt, axis=1)
angle = np.arccos(np.clip(dot_prod, a_min=-1, a_max=1))
return mse_error, angle
|
b015a5b904994372c6ca207388ee3db0ef477f0a
| 3,646,711
|
def _get_count_bid(soup: bs4.BeautifulSoup) -> int:
""" Return bidding count from `soup`.
Parameters
----------
soup : bs4.BeautifulSoup
Soup of a Yahoo Auction page.
Returns
-------
int
Count of total bidding.
"""
tags = soup.find_all('dt', text='入札件数')
if len(tags) > 0:
tag = tags[0]
if isinstance(tag, bs4.element.Tag):
tag = tag.find_next_sibling('dd', {'class': 'Count__number'})
return int(tag.text[:-4])
return 0
|
066b1dcb519db10276dfce35ba04b04f5efdbe66
| 3,646,712
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.