content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def logout(request):
"""Logs out the user"""
user_logout(request)
return redirect(auth_views.login)
|
739ef6b3b4daded0af786f8261072e05e8bba273
| 3,643,667
|
import math
def workout_train_chunk_length(inp_len: int,
resampling_factor: int = 1,
num_encoders: int = 5,
kernel: int = 8,
stride: int = 2) -> int:
"""
Given inp_len, return the chunk size for training
"""
out_len = inp_len * resampling_factor
for _ in range(num_encoders):
out_len = math.ceil((out_len - kernel) / stride) + 1
for _ in range(num_encoders):
out_len = (out_len - 1) * stride + kernel
return math.ceil(out_len / resampling_factor)
|
a7e7f42aa9670f1bda98c588e50052db0f4eb90f
| 3,643,669
|
def asin(e):
"""
:rtype: Column
"""
return col(Asin(parse(e)))
|
7c7fb32e84d7a9af74bc64eed2f111fd2030a499
| 3,643,670
|
def ft32m3(ft3):
"""ft^3 -> m^3"""
return 0.028316847*ft3
|
74f55f722c7e90be3fa2fc1f79f506c44bc6e9bc
| 3,643,672
|
def max_shading_elevation(total_collector_geometry, tracker_distance,
relative_slope):
"""Calculate the maximum elevation angle for which shading can occur.
Parameters
----------
total_collector_geometry: :py:class:`Shapely Polygon <Polygon>`
Polygon corresponding to the total collector area.
tracker_distance: array-like
Distances between neighboring trackers and the reference tracker.
relative_slope: array-like
Slope between neighboring trackers and reference tracker. A positive
slope means neighboring collector is higher than reference collector.
Returns
-------
max_shading_elevation: float
The highest solar elevation angle for which shading can occur for a
given field layout and collector geometry [degrees]
Note
----
The maximum shading elevation angle is calculated for all neighboring
trackers using the bounding box geometry and the bounding circle. For
rectangular collectors (as approximated when using the bounding box), the
maximum shading elevation occurs when one of the upper corners of the
projected shading geometry and the lower corner of the reference collector
intersects. For circular collectors (as approximated by the bounding
cirlce), the maximum elevation occurs when the projected shadow is directly
below the reference collector and the two circles tangent to each other.
The maximum elevation is calculated using both the bounding box and the
bounding circle, and the minimum of these two elevations is returned. For
rectangular and circular collectors, the maximum elevation is exact,
whereas for other geometries, the returned elevation is a conservative
estimate.
"""
# Calculate extent of box bounding the total collector geometry
x_min, y_min, x_max, y_max = total_collector_geometry.bounds
# Collector dimensions
x_dim = x_max - x_min
y_dim = y_max - y_min
delta_gamma_rad = np.arcsin(x_dim / tracker_distance)
# Calculate max elevation based on the bounding box (rectangular)
max_elevations_rectangular = np.rad2deg(np.arcsin(
y_dim * np.cos(np.deg2rad(relative_slope)) /
(tracker_distance * np.cos(delta_gamma_rad)))) + relative_slope
# Calculate max elevations using the minimum bounding diameter (circular)
D_min = _calculate_min_tracker_spacing(total_collector_geometry)
max_elevations_circular = np.rad2deg(np.arcsin(
(D_min * np.cos(np.deg2rad(relative_slope)))/tracker_distance)) \
+ relative_slope
# Compute max elevation
max_elevation = np.nanmin([np.nanmax(max_elevations_rectangular),
np.nanmax(max_elevations_circular)])
return max_elevation
|
f3e623607ae1c2576fa375146f4acc6186189d8c
| 3,643,674
|
def tensor_scatter_add(input_x, indices, updates):
"""
Creates a new tensor by adding the values from the positions in `input_x` indicated by
`indices`, with values from `updates`. When multiple values are given for the same
index, the updated result will be the sum of all values. This operation is almost
equivalent to using ScatterNdAdd, except that the updates are applied on output `Tensor`
instead of input `Parameter`.
The last axis of `indices` is the depth of each index vectors. For each index vector,
there must be a corresponding value in `updates`. The shape of `updates` should be
equal to the shape of `input_x[indices]`. For more details, see use cases.
Note:
If some values of the `indices` are out of bound, instead of raising an index error,
the corresponding `updates` will not be updated to `input_x`.
Args:
- **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1].
- **indices** (Tensor) - The index of input tensor whose data type is int32 or int64.
The rank must be at least 2.
- **updates** (Tensor) - The tensor to update the input tensor, has the same type as input,
and updates. Shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:].
Returns:
Tensor, has the same shape and type as `input_x`.
Raises:
TypeError: If dtype of `indices` is neither int32 nor int64.
ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> import mindspore
>>> import numpy as np
>>> from mindspore import Tensor, nn
>>> from mindspore import ops
>>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32)
>>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32)
>>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32)
>>> output = ops.tensor_scatter_add(input_x, indices, updates)
>>> print(output)
[[ 3.1 0.3 3.6]
[ 0.4 0.5 -3.2]]
"""
return tensor_scatter_add_(input_x, indices, updates)
|
38707efab3d2f947cbc44dacb6427281d3b652cb
| 3,643,675
|
def test100():
"""
CIFAR-100 test set creator.
It returns a reader creator, each sample in the reader is image pixels in
[0, 1] and label in [0, 9].
:return: Test reader creator.
:rtype: callable
"""
return reader_creator(
paddle.v2.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5),
'test')
|
f43e27a7ce1ec40dfc50d513de5406b2683a566b
| 3,643,676
|
def _calculate_target_matrix_dimension(m, kernel, paddings, strides):
"""
Calculate the target matrix dimension.
Parameters
----------
m: ndarray
2d Matrix
k: ndarray
2d Convolution kernel
paddings: tuple
Number of padding in (row, height) on one side.
If you put 2 padding on the left and 2 padding on the right, specify 2.
strides: tuple
Step size in (row, height)
Returns
-------
out: tuple
Tuple containing (number of rows, number of columns)
Raises
------
ValueError
If kernel size is greater than m in any axis after padding
"""
source_height = m.shape[0]
source_width = m.shape[1]
padding_row = paddings[0]
padding_column = paddings[1]
kernel_height = kernel.shape[0]
kernel_width = kernel.shape[1]
if kernel_height > (source_height + padding_row) or kernel_width > (source_width + padding_column):
raise ValueError("Kernel size is larger than the matrix")
row_stride = strides[0]
col_stride = strides[1]
# (source_height - kernel_height)/strides[0] is how many steps you can go down.
# + 1 to include the start position.
target_height = int((source_height + padding_row - kernel_height) / row_stride) + 1
target_width = int((source_width + padding_column - kernel_width) / col_stride) + 1
return (target_height, target_width)
|
77b5cabd7101b957a27fc422d1ed1715525400a0
| 3,643,677
|
def any_email():
"""
Return random email
>>> import re
>>> result = any_email()
>>> type(result)
<type 'str'>
>>> re.match(r"(?:^|\s)[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)", result, re.IGNORECASE) is not None
True
"""
return "%s@%s.%s" % (any_string(max_length=10),
any_string(max_length=10),
any_string(min_length=2, max_length=3))
|
8575d02d3c9a777bc2cf27f1344676cad5514d5e
| 3,643,678
|
def transform_pts_base_to_stitched_im(pts):
"""Project 3D points in base frame to the stitched image
Args:
pts (np.array[3, N]): points (x, y, z)
Returns:
pts_im (np.array[2, N])
inbound_mask (np.array[N])
"""
im_size = (480, 3760)
# to image coordinate
pts_rect = pts[[1, 2, 0], :]
pts_rect[:2, :] *= -1
# to pixel
horizontal_theta = np.arctan2(pts_rect[0], pts_rect[2])
horizontal_percent = horizontal_theta / (2 * np.pi) + 0.5
x = im_size[1] * horizontal_percent
y = (
485.78 * pts_rect[1] / pts_rect[2] * np.cos(horizontal_theta)
+ 0.4375 * im_size[0]
)
# horizontal_theta = np.arctan(pts_rect[0, :] / pts_rect[2, :])
# horizontal_theta += (pts_rect[2, :] < 0) * np.pi
# horizontal_percent = horizontal_theta / (2 * np.pi)
# x = ((horizontal_percent * im_size[1]) + 1880) % im_size[1]
# y = (
# 485.78 * (pts_rect[1, :] / ((1 / np.cos(horizontal_theta)) * pts_rect[2, :]))
# ) + (0.4375 * im_size[0])
# x is always in bound by cylindrical parametrization
# y is always at the lower half of the image, since laser is lower than the camera
# thus only one boundary needs to be checked
inbound_mask = y < im_size[0]
return np.stack((x, y), axis=0).astype(np.int32), inbound_mask
|
c6397451e458af086fe316c6933ca27641daac26
| 3,643,679
|
def get_equal_static_values(*args):
"""get_equal_static_values(FileConstHandle input, FileConstHandle out) -> bool"""
return _RMF.get_equal_static_values(*args)
|
8f929f0eae16e620b5025ad34b4437d836d8d671
| 3,643,680
|
def quaternion_to_rotation_matrix(quaternion):
"""
This converts a quaternion representation of on orientation to
a rotation matrix. The input is a 4-component numpy array in
the order [w, x, y, z], and the output is a 3x3 matrix stored
as a 2D numpy array. We follow the approach in
"3D Math Primer for Graphics and Game Development" by
Dunn and Parberry.
"""
w = quaternion[0]
x = quaternion[1]
y = quaternion[2]
z = quaternion[3]
R = np.empty((3, 3), dtype=np.float64)
R[0][0] = 1.0 - 2.0*y**2 - 2.0*z**2
R[0][1] = 2.0*x*y + 2.0*w*z
R[0][2] = 2.0*x*z - 2.0*w*y
R[1][0] = 2.0*x*y - 2.0*w*z
R[1][1] = 1.0 - 2.0*x**2 - 2.0*z**2
R[1][2] = 2.0*y*z + 2.0*w*x
R[2][0] = 2.0*x*z + 2.0*w*y
R[2][1] = 2.0*y*z - 2.0*w*x
R[2][2] = 1.0 - 2.0*x**2 - 2.0*y**2
return R
|
95a8dd9d0a9510710e7b6ed676a5f03e26b2da96
| 3,643,681
|
def load_subject(filename: str,
mask_niimg):
"""
Load a subject saved in .mat format with
the version 7.3 flag. Return the subject
niimg, using a mask niimg as a template
for nifti headers.
Args:
filename <str> the .mat filename for the subject data
mask_niimg niimg object the mask niimg object used for nifti headers
"""
subject_data = None
with h5py.File(filename, 'r') as f:
subject_data = f['SM_feature'][()]
# It's necessary to reorient the axes, since h5py flips axis order
subject_data = np.moveaxis(subject_data, [0, 1, 2, 3], [3, 2, 1, 0])
subject_niimg = nl.image.new_img_like(
mask_niimg, subject_data, affine=mask_niimg.affine, copy_header=True)
return subject_niimg
|
e3cdb751cebd7407b694555adfb21e7a6a224c50
| 3,643,682
|
def pretty_duration(seconds):
"""Return a human-readable string for the specified duration"""
if seconds < 2:
return '%d second' % seconds
elif seconds < 120:
return '%d seconds' % seconds
elif seconds < 7200:
return '%d minutes' % (seconds // 60)
elif seconds < 48 * 3600:
return '%d hours' % (seconds // 3600)
else:
return '%d days' % (seconds // (24 * 3600))
|
8e34addedeeb98e1e028fa9374fcc8c4f134a9f7
| 3,643,684
|
def plot_ecdf(tidy_data, cats, val, title, width=550, conf_int=False):
"""
Plots an ECDF of tidy data.
tidy_data: Set of tidy data.
cats: Categories to plot
val: The value to plot
title: Title of plot
width: width of plot
conf_int: Whether or not to bootstrap a CI.
"""
p = bokeh_catplot.ecdf(
data = tidy_data,
cats = cats,
val = val,
title = title,
width = width,
conf_int = conf_int,
)
return p
|
11ef82111ad300826f47f6ce91ea588911a790c8
| 3,643,685
|
from operator import concat
def upsert(left, right, inclusion=None, exclusion=None):
"""Upserts the specified left collection with the specified right collection by overriding the
left values with the right values that have the same indices and concatenating the right values
to the left values that have different indices on the common keys that are in the specified
inclusive list and are not in the specified exclusive list."""
right = collection_to_common_type(right, left, inclusion=inclusion, exclusion=exclusion)
left = update(left, include_index(right, left))
return concat(left, exclude_index(right, left))
|
b4754c01ff521b892107afd8dd12b015bd4e293a
| 3,643,686
|
from typing import Counter
def train(training_data):
"""Trains the model on a given data set.
Parameters
----------
training_data
Returns
-------
"""
counts = Counter(training_data)
model = {}
# sort counts by lowest occurrences, up to most frequent.
# this allows higher frequencies to overwrite related
# values in the model
for pair, _ in counts.most_common()[:-len(counts)-1:-1]:
word, tag = pair
model[word] = tag
return model
|
328901b090392097d22b21a948691787e0128d48
| 3,643,687
|
from datetime import datetime
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['cache'] = 86400
desc['description'] = """This chart totals the number of distinct calendar
days per month that a given present weather condition is reported within
the METAR data feed. The calendar day is computed for the local time zone
of the reporting station.
<p>The reporting of present weather codes within METARs has changed over
the years and there is some non-standard nomenclature used by some sites.
The thunder (TS) reports are delineated into three categories here to
hopefully allow more accurate statistics.
<ul>
<li><strong>All Thunder Reports (TS)</strong> includes any
<code>TS</code> mention in any present weather code</li>
<li><strong>Thunder in Vicinity (VCTS)</strong> includes any
<code>VCTS</code> mention in any present weather code, for example,
<code>VCTSRA</code> would match.</li>
<li><strong>Thunder Reports (excluding VCTS)</strong> includes most
<code>TS</code> mentions, but not any including <code>VC</code></li>
</ul>
"""
desc['arguments'] = [
dict(type='zstation', name='zstation', default='DSM',
label='Select Station:', network='IA_ASOS'),
dict(type='year', name="year", label='Year to Highlight:',
default=datetime.date.today().year, min=1973),
dict(type='select', name='var', default='FG',
label='Present Weather Option:', options=PDICT),
]
return desc
|
0927644a801a2829a97fbc6e78ef70fff1e8edfe
| 3,643,688
|
import logging
from datetime import datetime
import time
def check_cortex(ioc, ioc_type, object_id, is_mail=False, cortex_expiration_days=30):
"""Run all available analyzer for ioc.
arguments:
- ioc: value/path of item we need to check on cortex
- ioc_type: type of the ioc (generic_relation and cortex datatype)
- object_id: item to attach report to
- is_mail: ioc is a mail [mail datatype is for addresses and file is for mail]
"""
_, _, cortex_api = get_info(mail=False)
# Mail object is file in cortex
# need to save mail object analyzer as mail_obj to discriminate them
filter_type = ioc_type if not is_mail else "mail_obj"
analyzers = Analyzer.objects.filter(
disabled=False, supported_types__contains=[filter_type]
).order_by("-priority")
# Full mail only on premise
if ioc_type == "file":
analyzers = analyzers.filter(onpremise=True)
if is_mail is True:
content_type = Mail
else:
content_type = Attachment
elif ioc_type == "mail":
content_type = Address
elif ioc_type == "url":
content_type = Url
elif ioc_type == "domain":
content_type = Domain
elif ioc_type == "ip":
content_type = Ip
elif ioc_type == "hash":
content_type = Attachment
else:
logging.error("Wrong ioc_type type {}".format(ioc_type))
return
old_reports = Report.objects.filter(
content_type=ContentType.objects.get_for_model(content_type),
object_id=object_id,
success=True,
date__gte=datetime.datetime.today()
- datetime.timedelta(days=cortex_expiration_days),
)
try:
db_object = content_type.objects.get(pk=object_id)
except Exception:
logging.error("CORTEX {} {} {} {}".format(ioc, ioc_type, object_id, is_mail))
return
for analyzer in analyzers:
# Check if item was already been processed
for report in old_reports:
if report.analyzer == analyzer:
if "malicious" in report.taxonomies:
db_object.tags.add(
"{}: malicious".format(analyzer.name),
tag_kwargs={"color": "#FF0000"},
)
db_object.taxonomy = 4
db_object.save()
elif "suspicious" in report.taxonomies:
db_object.tags.add(
"{}: suspicious".format(analyzer.name),
tag_kwargs={"color": "#C15808"},
)
db_object.taxonomy = max(3, db_object.taxonomy)
db_object.save()
elif "safe" in report.taxonomies:
db_object.tags.add(
"{}: safe".format(analyzer.name),
tag_kwargs={"color": "#00FF00"},
)
db_object.taxonomy = max(2, db_object.taxonomy)
db_object.save()
elif "info" in report.taxonomies:
db_object.tags.add(
"{}: info".format(analyzer.name),
tag_kwargs={"color": "#00B0FF"},
)
db_object.taxonomy = max(1, db_object.taxonomy)
db_object.save()
continue
# If not rerun the analyzer
try:
job = cortex_api.analyzers.run_by_name(
analyzer.name,
{"data": ioc, "dataType": ioc_type, "tlp": 1},
force=1,
)
while job.status not in ["Success", "Failure"]:
time.sleep(10)
job = cortex_api.jobs.get_report(job.id)
if job.status == "Success":
response = job.json()
try:
taxonomies = glom(
response, ("report.summary.taxonomies", ["level"])
)
except PathAccessError:
taxonomies = None
report = Report(
response=response,
content_object=db_object,
analyzer=analyzer,
taxonomies=taxonomies,
success=True,
)
report.save()
if "malicious" in taxonomies:
db_object.tags.add(
"{}: malicious".format(analyzer.name),
tag_kwargs={"color": "#FF0000"},
)
db_object.taxonomy = 4
db_object.save()
elif "suspicious" in taxonomies:
db_object.tags.add(
"{}: suspicious".format(analyzer.name),
tag_kwargs={"color": "#C15808"},
)
db_object.taxonomy = max(3, db_object.taxonomy)
db_object.save()
elif "safe" in taxonomies:
db_object.tags.add(
"{}: safe".format(analyzer.name),
tag_kwargs={"color": "#00FF00"},
)
db_object.taxonomy = max(2, db_object.taxonomy)
db_object.save()
elif "info" in taxonomies:
db_object.tags.add(
"{}: info".format(analyzer.name),
tag_kwargs={"color": "#00B0FF"},
)
db_object.taxonomy = max(1, db_object.taxonomy)
db_object.save()
elif job.status == "Failure":
report = Report(
content_object=db_object,
analyzer=analyzer,
success=False,
)
report.save()
except Exception as excp:
logging.error(
"ERROR running analyzer {} for {}: {}".format(analyzer.name, ioc, excp)
)
return True
|
c647ab09bd97dda75dc4073634ac4a68cbf8613a
| 3,643,689
|
def _env_corr_same(wxy, Xa, Ya, sign=-1, log=True, x_ind=None, y_ind=None):
"""
The cSPoC objective function with same filters for both data sets:
the correlation of amplitude envelopes
Additionally, it returns the gradients of the objective function
with respect to each of the filter coefficients.
Notes:
------
The input datasets Xa and Ya are the analytic representations of the
original datasets X and Y, hence they must be complex arrays.
Xa and Ya can be either 2d numpy arrays of shape
(channels x datapoints) or 3d array of shape
(channels x datapoints x trials).
For 3d arrays the average envelope in each trial is calculated if x_ind
(or y_ind, respectively) is None. If they are set, the difference of
the instantaneous amplitude envelope at x_ind/y_ind and the average
envelope is calculated for each trial.
If log == True, then the log transform is taken before the average
inside the trial
Input:
------
-- wxy is the array of shared filter coefficients for x and y
-- Xa - numpy array - complex analytic representation of X
Xa is the first Hilbert-transformed dataset of shape px x N (x tr),
where px is the number of sensors, N the number of datapoints, tr
the number of trials
-- Ya is the second Hilbert-transformed dataset of shape py x N (x tr)
-- sign {-1, 1} - the correlation coefficient is multiplied with this
number. If the result of this function is minimized
-1 should be used to find maximum correlation, 1
should be used to find maximal anti-correlation,
defaults to -1
-- log {True, False} - compute the correlation between the log-
transformed envelopes, if datasets come in
epochs, then the log is taken before averaging
inside the epochs
-- x_ind int - the time index (-Xa.shape[1] <= x_ind < Xa.shape[1]),
where the difference of the instantaneous envelope and
the average envelope is determined for Xa
-- y_ind int - the time index (-Ya.shape[1] <= y_ind < Ya.shape[1]),
where the difference of the instantaneous envelope and
the average envelope is determined for Ya
Output:
-------
-- c - float - the correlation coefficient of the amplitude envelopes
of X and Y multiplied by the value of \"sign\"
-- c_der - numpy array - the gradient of c with respect to each of the
coefficients in wxy
"""
assert isinstance(Xa, _np.ndarray), "Xa must be numpy array"
assert _np.iscomplexobj(Xa), "Xa must be a complex-type numpy array" +\
", i.e. the analytic representaion of X"
assert (Xa.ndim ==2 or Xa.ndim==3), "Xa must be 2D or 3D numpy array"
assert isinstance(Ya, _np.ndarray), "Ya must be numpy array"
assert _np.iscomplexobj(Ya), "Ya must be a complex-type numpy array" +\
", i.e. the analytic representation of Y"
assert (Ya.ndim ==2 or Ya.ndim==3), "Ya must be 2D or 3D numpy array"
assert Xa.shape[-1] == Ya.shape[-1], "Size of last dimension in Xa " +\
"Ya must agree"
p1 = Xa.shape[0]
p2 = Ya.shape[0]
assert p1 == p2, 'Dimensionality of Xa and Ya must agree for cSPoc' +\
' with same filters'
assert len(wxy) == p1, "Length of wxy must equal the" + \
" number of variables in Xa and Ya"
assert isinstance(log, bool), "\"log\" must be a boolean (True or False)"
assert sign in [-1, 1, 0], "\"sign\" must be -1, 1, or 0"
if x_ind != None:
assert Xa.ndim == 3, "If x_ind is set, Xa must be 3d array!"
assert isinstance(x_ind, int), "x_ind must be integer!"
assert ((x_ind >= -Xa.shape[1]) and
(x_ind < Xa.shape[1])), "x_ind must match the range of " +\
"Xa.shape[1]"
if y_ind != None:
assert Ya.ndim == 3, "If y_ind is set, Ya must be 3d array!"
assert isinstance(y_ind, int), "y_ind must be integer!"
assert ((y_ind >= -Ya.shape[1]) and
(y_ind < Ya.shape[1])), "y_ind must match the range of " +\
"Ya.shape[1]"
# filter signal spatially
Xa_filt = _np.tensordot(wxy, Xa, axes=(0,0))
Ya_filt = _np.tensordot(wxy, Ya, axes=(0,0))
# get envelope of filtered signal
x_env = _np.abs(Xa_filt)
y_env = _np.abs(Ya_filt)
# get derivatives of envelopes
envx_derwx = ((Xa_filt.real * Xa.real +
Xa_filt.imag * Xa.imag) / x_env)
envy_derwy = ((Ya_filt.real * Ya.real +
Ya_filt.imag * Ya.imag) / y_env)
if log:
envx_derwx = envx_derwx / x_env
envy_derwy = envy_derwy / y_env
x_env = _np.log(x_env)
y_env = _np.log(y_env)
if ((Xa.ndim == 3) and (x_ind != None)):
envx_derwx = envx_derwx[:,x_ind] - envx_derwx.mean(1)
x_env = x_env[x_ind] - x_env.mean(0)
elif Xa.ndim == 3:
envx_derwx = envx_derwx.mean(1)
x_env = x_env.mean(0)
if ((Ya.ndim == 3) and (y_ind != None)):
envy_derwy = envy_derwy[:,y_ind] - envy_derwy.mean(1)
y_env = y_env[y_ind] - y_env.mean(0)
elif Ya.ndim == 3:
envy_derwy = envy_derwy.mean(1)
y_env = y_env.mean(0)
# remove mean of envelopes and derivatives
x_env = x_env - x_env.mean()
y_env = y_env - y_env.mean()
envx_derwx = envx_derwx - envx_derwx.mean(1)[:,_np.newaxis]
envy_derwy = envy_derwy - envy_derwy.mean(1)[:,_np.newaxis]
# numerator of correlation
num = _np.mean(x_env * y_env)
# derivative of numerator
num_d = _np.mean(envx_derwx*y_env + x_env*envy_derwy,1)
# denominator of correlation
denom = _np.sqrt(_np.mean(x_env**2) * _np.mean(y_env**2))
# derivative of denominator
denom_d = (
(_np.mean(x_env*envx_derwx,1)*_np.mean(y_env**2) +
_np.mean(x_env**2)*_np.mean(y_env*envy_derwy,1)
) /
_np.sqrt(_np.mean(x_env**2) * _np.mean(y_env**2)))
#final correlation
corr = num / denom
#final derivative
corr_d = (num_d*denom - num*denom_d) / denom**2
if sign == 0:
return _np.sign(corr)*corr, _np.sign(corr)*corr_d
else:
return sign*corr, sign*corr_d
|
2004e3ab1f9c81466e8dc946b04baf5a692b169d
| 3,643,690
|
def create_resource():
"""Hosts resource factory method"""
deserializer = HostDeserializer()
serializer = HostSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
|
ed49ae9fecce67fcd3c4fa1a2eac469eb97e239b
| 3,643,691
|
def get_ref_kmer(ref_seq, ref_name, k_len):
""" Load reference kmers. """
ref_mer = []
ref_set = set()
for i in range(len(ref_seq) - k_len + 1):
kmer = ref_seq[i:(i + k_len)]
if kmer in ref_set:
raise ValueError(
"%s found multiple times in reference %s, at pos. %d" % (
kmer, ref_name, i)
)
ref_mer.append(kmer)
ref_set.add(kmer)
return ref_mer
|
72b75dccfba122a986d50e144dea62bfafe0fb50
| 3,643,692
|
def get_distutils_build_or_install_option(option):
""" Returns the value of the given distutils build or install option.
Parameters
----------
option : str
The name of the option
Returns
-------
val : str or None
The value of the given distutils build or install option. If the
option is not set, returns None.
"""
return get_distutils_option(option, ['build', 'build_ext', 'build_clib',
'install'])
|
7f0d72e1c30c752761eb8c7f8f0ffeb875183d4d
| 3,643,693
|
def CMDset_close(parser, args):
"""Closes the issue."""
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
auth_config = auth.extract_auth_config_from_options(options)
if args:
parser.error('Unrecognized args: %s' % ' '.join(args))
cl = Changelist(auth_config=auth_config)
# Ensure there actually is an issue to close.
cl.GetDescription()
cl.CloseIssue()
return 0
|
6711ba947e9d839217a96568a0c07d1103646030
| 3,643,694
|
def gchip(k_k, b_b, c_c):
"""gchip(k_k, b_b, c_c)"""
yout = b_b*c_c*nu_f(1, b_b, k_k)**((c_c+1)/2)*\
cos((c_c+1)*atan(b_b*k_k))
return yout
|
81fa674a2fb03875e39f2968986104da06a5ea44
| 3,643,695
|
def create_component(ctx: NVPContext):
"""Create an instance of the component"""
return ProcessUtils(ctx)
|
ec9d4539583dbdeaf1c4f5d8fce337077d249ab2
| 3,643,696
|
import ipaddress
def is_valid_ip(ip: str) -> bool:
"""
Args:
ip: IP address
Returns: True if the string represents an IPv4 or an IPv6 address, false otherwise.
"""
try:
ipaddress.IPv4Address(ip)
return True
except ValueError:
try:
ipaddress.IPv6Address(ip)
return True
except ValueError:
return False
|
aa1d3b19828dd8c3dceaaa8d9d1017cc16c1f73b
| 3,643,697
|
def complete_tree(leaves):
"""
Complete a tree defined by its leaves.
Parmeters:
----------
leaves : np.array(dtype=np.int64)
Returns:
--------
np.array(dtype=np.int64)
"""
tree_set = _complete_tree(leaves)
return np.fromiter(tree_set, dtype=np.int64)
|
4bfc1ae01efd9595ef875613bd00d76f7c485421
| 3,643,698
|
import torch
def pick_best_batch_size_for_gpu():
"""
Tries to pick a batch size that will fit in your GPU. These sizes aren't guaranteed to work, but they should give
you a good shot.
"""
free, available = torch.cuda.mem_get_info()
availableGb = available / (1024 ** 3)
if availableGb > 14:
return 16
elif availableGb > 10:
return 8
elif availableGb > 7:
return 4
return 1
|
31d970697b417b40f8ef5b41fdeacc0e378543a0
| 3,643,699
|
def getTensorRelativError(tA, pA):
"""Get the relative error between two tensors."""
pA_shape = np.shape(pA)
tA_shape = np.shape(tA)
assert (pA_shape == tA_shape), "Arrays must be same shape"
err = np.max(np.abs(np.array(pA)-np.array(tA)))
return err
|
ea79bcebd5a39c020cdb5b35ee36dfe20b2f9c71
| 3,643,700
|
import itertools
def eliminations(rct_gras, prd_gras):
""" find eliminations consistent with these reactants and products
:param rct_gras: reactant graphs (must have non-overlapping keys)
:param prd_gras: product graphs (must have non-overlapping keys)
Eliminations are identified by forming a bond between an attacking heavy
atom and another atom not initially bonded to it, forming a ring. The bond
adjacent to the attacked atom is then broken, along with a second bond in
the ring, downstream of the attacking heavy atom, away from the attacked
atom.
"""
_assert_is_valid_reagent_graph_list(rct_gras)
_assert_is_valid_reagent_graph_list(prd_gras)
rxns = []
if len(rct_gras) == 1 and len(prd_gras) == 2:
rct_gra, = rct_gras
prds_gra = union_from_sequence(prd_gras)
ngb_keys_dct = atoms_neighbor_atom_keys(rct_gra)
frm1_keys = atom_keys(rct_gra, excl_syms=('H',))
frm2_keys = atom_keys(rct_gra)
bnd_keys = bond_keys(rct_gra)
frm_bnd_keys = [(frm1_key, frm2_key) for frm1_key, frm2_key
in itertools.product(frm1_keys, frm2_keys)
if frm1_key != frm2_key and
not frozenset({frm1_key, frm2_key}) in bnd_keys]
for frm1_key, frm2_key in frm_bnd_keys:
# Bond the radical atom to the hydrogen atom
gra_ = add_bonds(rct_gra, [(frm2_key, frm1_key)])
# Get keys to the ring formed by this extra bond
rng_keys = next((ks for ks in rings_atom_keys(gra_)
if frm2_key in ks and frm1_key in ks), None)
# Eliminations (as far as I can tell) only happen through TSs with
# 3- or 4-membered rings
if rng_keys is not None and len(rng_keys) < 5:
frm1_ngb_key, = ngb_keys_dct[frm1_key] & set(rng_keys)
frm2_ngb_key, = ngb_keys_dct[frm2_key] & set(rng_keys)
# Break the bonds on either side of the newly formed bond
gra_ = remove_bonds(gra_, [(frm1_key, frm1_ngb_key)])
gra_ = remove_bonds(gra_, [(frm2_key, frm2_ngb_key)])
inv_dct = isomorphism(gra_, prds_gra)
if inv_dct:
f_frm_bnd_key = (frm1_key, frm2_key)
f_brk_bnd_key1 = (frm1_key, frm1_ngb_key)
f_brk_bnd_key2 = (frm2_key, frm2_ngb_key)
inv_ = inv_dct.__getitem__
b_frm_bnd_key1 = tuple(map(inv_, f_brk_bnd_key1))
b_frm_bnd_key2 = tuple(map(inv_, f_brk_bnd_key2))
b_brk_bnd_key = tuple(map(inv_, f_frm_bnd_key))
forw_tsg = ts.graph(rct_gra,
frm_bnd_keys=[f_frm_bnd_key],
brk_bnd_keys=[f_brk_bnd_key1,
f_brk_bnd_key2])
back_tsg = ts.graph(prds_gra,
frm_bnd_keys=[b_frm_bnd_key1,
b_frm_bnd_key2],
brk_bnd_keys=[b_brk_bnd_key])
rcts_atm_keys = list(map(atom_keys, rct_gras))
prds_atm_keys = list(map(atom_keys, prd_gras))
if inv_dct[frm2_key] not in prds_atm_keys[1]:
prds_atm_keys = list(reversed(prds_atm_keys))
# Create the reaction object
rxns.append(Reaction(
rxn_cls=par.ReactionClass.ELIMINATION,
forw_tsg=forw_tsg,
back_tsg=back_tsg,
rcts_keys=rcts_atm_keys,
prds_keys=prds_atm_keys,
))
return ts_unique(rxns)
|
3acfb77d48223e1e31f7ce9b563bc5d86102b5b2
| 3,643,701
|
def sigmoid(*columns):
"""Fit a Sigmoid through the data of the last scan.
The return value is a pair of tuples::
((a, b, x0, c), (d_a, d_b, d_x0, d_c))
where the elemets of the second tuple the estimated standard errors of the
fit parameters. The fit parameters are:
* a - amplitude of the Sigmoid
* b - steepness of the curve
* x0 - center
* c - background
if the fit failed, the result is ``(None, None)``.
Example::
cscan(...)
values, stderr = sigmoid('h', 'adet')
"""
xs, ys, dys, _, ds = _getData(columns)
fit = SigmoidFit()
res = fit.run(xs, ys, dys)
if res._failed:
return None, None
session.notifyFitCurve(ds, 'sigmoid', res.curve_x, res.curve_y)
descrs = ['amplitude', 'steepness', 'center', 'background']
vals = []
for par, err, descr in zip(res._pars[1], res._pars[2], descrs):
vals.append((descr, '%.4f' % par, '%.4f' % err))
printTable(('parameter', 'value', 'error'), vals, session.log.info)
return CommandLineFitResult((tuple(res._pars[1]), tuple(res._pars[2])))
|
e9d031d07a8ef00b73634bb44ed6a94a1788f7c9
| 3,643,702
|
import re
def MakeSamplesFromOutput(metadata, output):
"""Create samples containing metrics.
Args:
metadata: dict contains all the metadata that reports.
output: string, command output
Example output:
perfkitbenchmarker/tests/linux_benchmarks/nccl_benchmark_test.py
Returns:
Samples containing training metrics, and the bandwidth
"""
samples = []
metadata.update(_SAMPLE_LINE_RE.match(output).groupdict())
results = regex_util.ExtractAllMatches(
r'(Rank\s+\d+) (.*)', output)
for rank, device in results:
metadata[rank] = device
results = regex_util.ExtractAllMatches(
r'^\s*'
r'(\d+)\s+'
r'(\d+)\s+'
r'(\w+)\s+'
r'(\w+)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\S+)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\d+(?:\.\d+)?)\s+'
r'(\S+)', output, re.MULTILINE)
max_out_of_place_algbw = 0
for row in results:
metadata_copy = metadata.copy()
metadata_copy.update(zip(_METADATA_COLUMNS, row))
for metric, metadata_key in sorted(_SAMPLE_NAMES.items()):
samples.append(sample.Sample(metric, float(metadata_copy[metadata_key]),
'GB/s', metadata_copy))
# Gbps is gigaBIT per second and GB/s is gigaBYTE per second
max_out_of_place_algbw = max(max_out_of_place_algbw,
float(metadata_copy['out_of_place_algbw']))
avg_bus_bandwidth = regex_util.ExtractExactlyOneMatch(
r'Avg bus bandwidth\s+: ([0-9\.]+)', output)
samples.append(sample.Sample('avg_busbw', float(avg_bus_bandwidth),
'GB/s', metadata))
samples.append(sample.Sample('max_out_of_place_algbw',
max_out_of_place_algbw * 8, 'Gbps', metadata))
return samples, max_out_of_place_algbw
|
2210caaf37a2fbfe768133e767754fb600435b0b
| 3,643,704
|
def tree_to_newick_rec(cur_node):
""" This recursive function is a helper function to generate the Newick string of a tree. """
items = []
num_children = len(cur_node.descendants)
for child_idx in range(num_children):
s = ''
sub_tree = tree_to_newick_rec(cur_node.descendants[child_idx])
if sub_tree != '':
s += '(' + sub_tree + ')'
s += cur_node.descendants[child_idx].name
items.append(s)
return ','.join(items)
|
751d46dbb4e3a5204900601164410b5bf7f0578b
| 3,643,706
|
import torch
def mdetr_efficientnetB3(pretrained=False, return_postprocessor=False):
"""
MDETR ENB3 with 6 encoder and 6 decoder layers.
Pretrained on our combined aligned dataset of 1.3 million images paired with text.
"""
model = _make_detr("timm_tf_efficientnet_b3_ns")
if pretrained:
checkpoint = torch.hub.load_state_dict_from_url(
url="https://zenodo.org/record/4721981/files/pretrained_EB3_checkpoint.pth",
map_location="cpu",
check_hash=True,
)
model.load_state_dict(checkpoint["model"])
if return_postprocessor:
return model, PostProcess()
return model
|
0e80da12a9fec55ccdbfdb3dcf78806bff2c6f20
| 3,643,707
|
import math
import itertools
def _check_EJR_brute_force(profile, committee):
"""
Test using brute-force whether a committee satisfies EJR.
Parameters
----------
profile : abcvoting.preferences.Profile
A profile.
committee : iterable of int
A committee.
Returns
-------
bool
"""
# should check for ell from 1 until committee size
ell_upper_bound = len(committee) + 1
# loop through all possible ell
for ell in range(1, ell_upper_bound):
# list of candidates with less than ell approved candidates in committee
voters_less_than_ell_approved_candidates = []
# compute minimum group size for this ell
group_size = math.ceil(ell * (len(profile) / len(committee)))
# compute list of candidates to consider
for i, voter in enumerate(profile):
if len(voter.approved & committee) < ell:
voters_less_than_ell_approved_candidates.append(i)
# check if an ell-cohesive group can be formed with considered candidates
if len(voters_less_than_ell_approved_candidates) < group_size:
# if not possible then simply continue with next ell
continue
# check all possible combinations of considered voters,
# taken (possible group size) at a time
for combination in itertools.combinations(
voters_less_than_ell_approved_candidates, group_size
):
# to calculate the cut of approved candidates for the considered voters
# initialize the cut to be the approval set of the first candidate in current
# combination
cut = set(profile[combination[0]].approved)
# calculate the cut over all voters for current combination
# (also can skip first voter in combination, but inexpensive enough...)
for j in combination:
cut = cut & profile[j].approved
# if size of cut is >= ell, then combination is an ell-cohesive group
if len(cut) >= ell:
# we have found combination to be an ell-cohesive set, with no voter having
# at least ell approved candidates in committee. Thus EJR fails
detailed_information = {
"cohesive_group": voters_less_than_ell_approved_candidates,
"ell": ell,
"joint_candidates": cut,
}
return False, detailed_information
# if function has not returned by now, then it means that for all ell,
# no ell-cohesive group was found among candidates with less than ell
# approved candidates in committee. Thus committee satisfies EJR
detailed_information = {}
return True, detailed_information
|
f993daf9628ecad18fe25894ccd5fb8882d3e596
| 3,643,708
|
def read_stanford_labels():
"""Read stanford hardi data and label map"""
# First get the hardi data
fetch_stanford_hardi()
hard_img, gtab = read_stanford_hardi()
# Fetch and load
files, folder = fetch_stanford_labels()
labels_file = pjoin(folder, "aparc-reduced.nii.gz")
labels_img = nib.load(labels_file)
return hard_img, gtab, labels_img
|
8c9e5a3586125e7ffe3f9fe36b734ca7c19de53f
| 3,643,709
|
import urllib
def _WrapRequestForUserAgentAndTracing(http_client, trace_token,
trace_email,
trace_log,
gcloud_ua):
"""Wrap request with user-agent, and trace reporting.
Args:
http_client: The original http object.
trace_token: str, Token to be used to route service request traces.
trace_email: str, username to which service request traces should be sent.
trace_log: bool, Enable/diable server side logging of service requests.
gcloud_ua: str, User agent string to be included in the request.
Returns:
http, The same http object but with the request method wrapped.
"""
orig_request = http_client.request
def RequestWithUserAgentAndTracing(*args, **kwargs):
"""Wrap request with user-agent, and trace reporting.
Args:
*args: Positional arguments.
**kwargs: Keyword arguments.
Returns:
Wrapped request method with user-agent and trace reporting.
"""
modified_args = list(args)
# Use gcloud specific user-agent with command path and invocation-id.
# Pass in the user-agent through kwargs or args.
def UserAgent(current=''):
user_agent = '{0} {1}'.format(current, gcloud_ua)
return user_agent.strip()
cur_ua = RequestArgsGetHeader(modified_args, kwargs, 'user-agent', '')
RequestArgsSetHeader(modified_args, kwargs,
'user-agent', UserAgent(cur_ua))
# Modify request url to enable requested tracing.
url_parts = urlparse.urlsplit(args[0])
query_params = urlparse.parse_qs(url_parts.query)
if trace_token:
query_params['trace'] = 'token:{0}'.format(trace_token)
elif trace_email:
query_params['trace'] = 'email:{0}'.format(trace_email)
elif trace_log:
query_params['trace'] = 'log'
# Replace the request url in the args
modified_url_parts = list(url_parts)
modified_url_parts[3] = urllib.urlencode(query_params, doseq=True)
modified_args[0] = urlparse.urlunsplit(modified_url_parts)
return orig_request(*modified_args, **kwargs)
http_client.request = RequestWithUserAgentAndTracing
# apitools needs this attribute to do credential refreshes during batch API
# requests.
if hasattr(orig_request, 'credentials'):
setattr(http_client.request, 'credentials', orig_request.credentials)
return http_client
|
d6a2a4c127670aa8409cb39a81833a5a5e3fba90
| 3,643,710
|
def indexData_x(x, ukn_words):
"""
Map each word in the given data to a unique integer. A special index will be kept for "out-of-vocabulary" words.
:param x: The data
:return: Two dictionaries: one where words are keys and indexes values, another one "reversed" (keys->index, values->words)
"""
# Retrieve all words used in the data (with duplicates)
all_text = [w for e in x for w in e]
# Create a DETERMINISTIC set of all words
used = set()
words = [x for x in all_text if x not in used and (used.add(x) or True)]
print("Number of entries: ",len(all_text))
print("Individual entries: ",len(words))
# Assign an integer index for each individual word
word2ind = {word: index for index, word in enumerate(words, 2)}
ind2word = {index: word for index, word in enumerate(words, 2)}
# To deal with out-of-vocabulary words
word2ind.update({ukn_words:1})
ind2word.update({1:ukn_words})
# The index '0' is kept free in both dictionaries
return word2ind, ind2word
|
3f6ffd97d33400c3418b78ad3b383766cc07bee3
| 3,643,711
|
def BFS_TreeSearch(problem):
"""
Tree Search BFS
Args->problem: OpenAI Gym environment
Returns->(path, time_cost, space_cost): solution as a path and stats.
"""
node = Node(problem.startstate, None)
time_cost = 0
space_cost = 1
if node.state == problem.goalstate:
return build_path(node), time_cost, space_cost
frontier = NodeQueue()
frontier.add(node)
while not frontier.is_empty():
current = frontier.remove()
for action in range(problem.action_space.n):
time_cost += 1
child = Node(problem.sample(current.state, action), current)
if(child.state == problem.goalstate):
return build_path(child), time_cost, space_cost # solution
frontier.add(child)
space_cost = max(space_cost,len(frontier))
return None, time_cost, space_cost
|
b2523dea8b9813e2582a0acef27b0d46bb1a14b9
| 3,643,712
|
from typing import Tuple
from typing import Optional
def _objective_function(extra_features: jnp.ndarray,
media_mix_model: lightweight_mmm.LightweightMMM,
media_input_shape: Tuple[int,
int], media_gap: Optional[int],
target_scaler: Optional[preprocessing.CustomScaler],
media_scaler: preprocessing.CustomScaler,
geo_ratio: jnp.array,
seed: Optional[int],
media_values: jnp.ndarray) -> jnp.float64:
"""Objective function to calculate the sum of all predictions of the model.
Args:
extra_features: Extra features the model requires for prediction.
media_mix_model: Media mix model to use. Must have a predict method to be
used.
media_input_shape: Input shape of the data required by the model to get
predictions. This is needed since optimization might flatten some arrays
and they need to be reshaped before running new predictions.
media_gap: Media data gap between the end of training data and the start of
the out of sample media given. Eg. if 100 weeks of data were used for
training and prediction starts 2 months after training data finished we
need to provide the 8 weeks missing between the training data and the
prediction data so data transformations (adstock, carryover, ...) can take
place correctly.
target_scaler: Scaler that was used to scale the target before training.
media_scaler: Scaler that was used to scale the media data before training.
geo_ratio: The ratio to split channel media across geo. Should sum up to 1
for each channel and should have shape (c, g).
seed: Seed to use for PRNGKey during sampling. For replicability run
this function and any other function that gets predictions with the same
seed.
media_values: Media values required by the model to run predictions.
Returns:
The negative value of the sum of all predictions.
"""
if hasattr(media_mix_model, "n_geos") and media_mix_model.n_geos > 1:
media_values = geo_ratio * jnp.expand_dims(media_values, axis=-1)
media_values = jnp.tile(
media_values / media_input_shape[0], reps=media_input_shape[0])
# Distribute budget of each channels across time.
media_values = jnp.reshape(a=media_values, newshape=media_input_shape)
media_values = media_scaler.transform(media_values)
return -jnp.sum(
media_mix_model.predict(
media=media_values.reshape(media_input_shape),
extra_features=extra_features,
media_gap=media_gap,
target_scaler=target_scaler,
seed=seed).mean(axis=0))
|
f8ae8185d84d811dc1d5796aa8374127d2f16ea5
| 3,643,714
|
from typing import Union
from functools import reduce
def decode_block(block: np.ndarray) -> Union[np.ndarray, bool]:
"""
Decode a data block with hamming parity bits.
:param block: The data block to be decoded
:return the decoded data bits, False if the block is invalid
"""
if not block.size & block.size - 1 and block.size & 0x5555_5555:
_block = np.array(block.flat)
flip = reduce(lambda x, y: x ^ y, [i for i, bit in enumerate(_block) if bit] + [1, 1])
if flip:
if reduce(lambda x, y: x ^ y, _block):
warn('Two or more bit-flips occur, self-correction failed.')
warn("Single bit-flip at index {} corrected".format(flip))
_block[flip] = not _block[flip]
return np.array([bit for i, bit in enumerate(_block) if i and i & i - 1])
warn('Invalid block size.')
return False
|
c9ed9eb03271e2222aa62260461aaa7ee90eb842
| 3,643,715
|
def occupancy(meta, ax=None):
""" Show channel occupancy over time.
"""
if ax is None:
f, ax = plt.subplots()
f.set_figwidth(14)
f.suptitle("Occupancy over time")
start_time = meta.read_start_time.min() / 10000 / 60
end_time = meta.read_end_time.max() / 10000 / 60
total_minutes = end_time - start_time
num_channels = meta.channel_number.max()+1
X = np.zeros((num_channels, int(np.ceil(total_minutes))))
for channel, group in meta.groupby("channel_number"):
for index, read in group.iterrows():
a,b = read.read_start_time/10000/60, read.read_end_time / 10000 / 60
X[channel, round(a):round(b)] = 1
ax.imshow(X, aspect= total_minutes/1800, cmap="Greys")
ax.xaxis.set_label_text("Time (in minutes)")
ax.yaxis.set_label_text("Channel number")
return ax.get_figure(), ax
|
f6505a5bf7ff417194457ee2e04118edea9e6738
| 3,643,716
|
def reg1_r_characteristic(r, s, alpha, beta, c, h):
"""
evaluate x - ((4/3)r - (2/3)s)t in region 1, equation 19
"""
# when s < 0 the expression can be factored and you avoid the
# difference of nearly equal numbers and dividing by a small number
# equation 74
rr = r/c
ss = s/c
poly1 = 2.0*rr - ss
poly2 = 3.0*rr*rr - 2.0*rr*ss + ss*ss
poly3 = 4.0*rr**3 - 3.0*rr*rr*ss + 2.0*rr*ss*ss - ss**3
value = np.where(s <= 0., h*(1.0 - (2./3.)*poly2 + (32./135.)*poly3 + (4./9.)*alpha*(poly1 - poly2 + (4./15.)*poly3) - (2./9.)*beta*(poly2 - (8./15.)*poly3)),
evalPhip(r,alpha,beta,c,h)/(r + s + 1e-20) - (evalPhi(r,alpha,beta,c,h) - evalPhi(s,alpha,beta,c,h))/(r + s + 1e-20)**2 )
return value
|
9802483289387b7996665fe8f061d4393ff0daaf
| 3,643,717
|
from typing import Tuple
def olf_gd_offline_in_z(X: np.ndarray, k: int, rtol: float = 1e-6,
max_iter: int = 100000,
rectY: bool = False, rectZ: bool = False,
init: str = 'random', Y0=None, Z0=None,
verbose: bool = False, alpha=1,
cycle=500, rho=1, beta=0.1) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Gradient descent to calculate the solution of the olfactory
cost function
Parameters
----------
X:
input data matrix
k:
number of LNs
etamax:
the initial learning rate
rtol:
relative tolerance
init:
initialization for the Y and Z matrices
Returns
------
Y and Z
Matrices minimizing the olf cost function
"""
D, N = X.shape # n_s is the number of samples
if init != 'given':
Y = get_init_matrix(D, N, method=init, A=X, scale=100, rect=rectY)
# Y = X # i wonder if you should do the simulation with the random matrix,
# so that is it not "cheating" by already taking a solution which resembles
# what you think the solution should be.
if rectY:
Y = rectify(Y)
# Y = X
Z = get_init_matrix(k, N, method=init, A=X, scale=100, rect=rectZ)
# Y = np.array(Y, dtype=FLOAT_TYPE)
# A = np.array(A, dtype=FLOAT_TYPE)
# X = np.array(X, dtype=FLOAT_TYPE, order='F')
# Y = np.array(Y, dtype=FLOAT_TYPE, order='F')
# Z = np.array(Z, dtype=FLOAT_TYPE, order='F')
else:
Y = Y0
Z = Z0
sigma = 0.1
cost0 = olf_cost(X, Y, Z, rho=rho)
cost2 = cost0.copy()
print(f'initial cost: {cost0}')
for j in range(max_iter):
Y_old = Y.copy()
cost_old = cost2
Y, cost2, successY, a1 = gd_step_Y(X, Y, Z, cost_old, sigma,
beta, alpha, rectY, rho=rho)
if not successY:
break
conv1 = np.amax(np.abs(Y - Y_old) / np.abs(Y_old + 1e-2))
d_cost1 = np.abs(cost_old - cost2) / np.abs(cost_old)
if d_cost1 < rtol and conv1 < rtol:
print(f'stopped y iteration because cost and Y stopped changing, {j}')
break
if j % cycle == 0 and j > 0:
alpha *= beta
cost0 = olf_cost(X, Y, Z, rho=rho)
cost2 = cost0.copy()
cost1 = cost0.copy()
print(f'cost after fixing Y: {cost0}')
# eye = np.eye(N)/N
costs = np.zeros(max_iter)
# eye = np.array(eye, dtype=FLOAT_TYPE, order='F')
if rectZ:
funcz = rectify
else:
funcz = lambda x: x
for i in range(max_iter):
# print(i, cost2)
costs[i] = cost2
Y_old = Y.copy()
Z_old = Z.copy()
cost_old2 = cost2.copy()
grad_Z = get_grad_Z(Y, Z, rho=rho)
# grad_Z = -get_grad_Z2(X, Z, rho=rho)
alpha_z = alpha
while alpha_z > EPSILON * 0.000001:
successZ = False
Z_new = funcz(Z_old + alpha_z * grad_Z)
# expected_cost_increase = sigma * np.sum(grad_Z * (Z_new - Z_old))
alpha_y = alpha
Y = Y_old.copy()
cost1 = olf_cost(X, Y, Z_new, rho=rho)
# print(alpha_z, cost1)
for j in range(max_iter):
# print(j, cost1)
Y_old2 = Y.copy()
cost_old1 = cost1
Y, cost1, successY, a1 = gd_step_Y(X, Y_old2, Z_new, cost_old1, sigma,
beta, alpha_y, rectY, rho=rho)
if not successY:
# print('y iteration not successful')
break
conv1 = np.amax(np.abs(Y - Y_old2) / np.abs(Y_old2 + 1e-2))
d_cost1 = np.abs(cost_old1 - cost1) / np.abs(cost_old1)
# print(conv1, d_cost1)
if d_cost1 < rtol and conv1 < rtol:
# print(f'stopped y iteration because cost and Y'
# f'stopped changing, {j}, {alpha_y}')
break
# print(f'i, j: {i}, {j}, after y iteration: costs: {cost2}, {cost_old1}, {cost1}')
# cost_new = olf_cost(X, Y, Z_new, rho=rho)
# print(expected_cost_increase, cost_new - cost_old)
cost_new = cost1
# if cost_new - cost_old2 > expected_cost_increase:
if cost_new - cost_old2 > 0:
# print(f'z iteration successful, {cost_old2}, {cost_new}')
successZ = True
break
alpha_z *= beta
if successZ:
Z = Z_new
cost2 = cost_new
else:
print('stopped because Z gd steps was unsuccessfull')
break
convz = np.amax(np.abs(Z-Z_old) / np.abs(Z_old + 1e-2))
d_cost2 = np.abs(cost_old2 - cost2) / np.abs(cost_old2)
if d_cost2 < rtol and convz < rtol:
print('stopped because costs and Z stopped changing')
break
if i % cycle == 0 and i > 0:
alpha *= beta
print(i, 'costs:', cost_old, cost1, cost2)
# print('costs:', cost_old, cost1, cost2)
# break
print(f'i: {i}, costs: {cost0}, {cost2}')
return Y, Z, costs[:i+1]
|
2397b0d24fa8fa9b8ddb9d7dd8553bd076feeb39
| 3,643,718
|
def get_cluster_codes(cluster: pd.Categorical) -> pd.Series:
"""Get the X location for plotting p-value string."""
categories = cluster.cat.categories.rename("cluster")
return pd.Series(range(len(categories)), index=categories, name="x")
|
bb6899e5245c14e47ac855fef95775c282a8ed0f
| 3,643,720
|
import math
import copy
def _expand_configurations_from_chain(chain, *, pragma: str = 'pytmc',
allow_no_pragma=False):
"""
Wrapped by ``expand_configurations_from_chain``, usable for callers that
don't want the full product of all configurations.
"""
def handle_scalar(item, pvname, config):
"""Handler for scalar simple or structured items."""
yield item, config
def handle_array_complex(item, pvname, config):
"""Handler for arrays of structured items (or enums)."""
low, high = item.array_info.bounds
expand_digits = math.floor(math.log10(high)) + 2
array_element_pragma = config.get('array', '')
for idx in parse_array_settings(array_element_pragma, (low, high)):
# shallow-copy; only touching the top level "pv" key
idx_config = copy.copy(config)
idx_config['pv'] += get_array_suffix(
config, idx, default=f':%.{expand_digits}d')
yield parser._ArrayItemProxy(item, idx), idx_config
def get_all_options(subitems, handler, pragmas):
split_pragma = split_pytmc_pragma('\n'.join(pragmas))
for pvname, separated_cfg in separate_configs_by_pv(split_pragma):
config = dictify_config(separated_cfg)
# config will have the SUBITEM key, applicable to its level
# in the hierarchy. If it exists, merge it with our current set.
if SUBITEM in config:
_merge_subitems(subitems, config[SUBITEM])
for key, value in subitems.get(PRAGMA, []):
config[key] = value
yield from handler(item, pvname, config)
# `subitems` keeps track of forward references with pragmas of members
# and sub-members (and so on)
subitems = {}
for item in chain:
subitems = subitems.get(item.name, {})
pragmas = list(pragma for pragma in get_pragma(item, name=pragma)
if pragma is not None)
if not pragmas:
if allow_no_pragma:
pragmas = [None]
yield [(item, None)]
continue
# If any pragma in the chain is unset, escape early
return []
if item.array_info and (item.data_type.is_complex_type or
item.data_type.is_enum):
options = get_all_options(subitems, handle_array_complex, pragmas)
else:
options = get_all_options(subitems, handle_scalar, pragmas)
yield list(options)
|
b1282a9cf65875be3e91c3f0eb09a73f0130ccf9
| 3,643,721
|
import base64
def encrypt(data=None, key=None):
"""
Encrypts data
:param data: Data to encrypt
:param key: Encryption key (salt)
"""
k = _get_padded_key(key)
e = AES.new(k, AES.MODE_CFB, k[::-1])
enc = e.encrypt(data)
return base64.b64encode(enc)
|
668a5e94ea6d1adddb038f5cab1f0d165bb98bb0
| 3,643,722
|
def box_in_k_largest(boxes, box, k):
"""Returns True if `box` is one of `k` largest boxes in `boxes`. If there are ties that
extend beyond k, they are included."""
if len(boxes) == 0:
return False
boxes = sorted(boxes, reverse=True, key=box_volume)
n = len(boxes)
prev = box_volume(boxes[0])
for i in range(n):
vol = box_volume(boxes[i])
if i >= k:
if vol < prev:
break
prev = vol
if np.array_equal(boxes[i], box):
return True
return False
|
e941513e47db5fb09e21b96933c629cf3c39bf49
| 3,643,724
|
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Returns specified diagonals.
If `a` is 2-D, returns the diagonal of a with the given offset, i.e., the
collection of elements of the form a[i, i+offset]. If `a` has more than two
dimensions, then the axes specified by axis1 and axis2 are used to determine
the 2-D sub-array whose diagonal is returned. The shape of the resulting
array can be determined by removing axis1 and axis2 and appending an index
to the right equal to the size of the resulting diagonals.
Args:
a (Tensor): Array from which the diagonals are taken.
offset (int): optional. Offset of the diagonal from the main diagonal.
Can be positive or negative. Defaults to main diagonal (0).
axis1 (int): optional. Axis to be used as the first axis of the 2-D
sub-arrays from which the diagonals should be taken. Defaults to
first axis (0).
axis2 (int): optional. Axis to be used as the second axis of the 2-D
sub-arrays from which the diagonals should be taken. Defaults to
second axis (1).
Returns:
Tensor, if `a` is 2-D, then a 1-D array containing the diagonal. If
a.ndim > 2, then the dimensions specified by axis1 and axis2 are removed,
and a new axis inserted at the end corresponding to the diagonal.
Raises:
ValueError: if the input tensor has less than two dimensions.
Supported Platforms:
``Ascend`` ``GPU`` ``CPU``
Examples:
>>> a = np.arange(4).reshape(2,2)
>>> print(a)
[[0 1]
[2 3]]
>>> output = np.diagonal(a)
>>> print(output)
[0 3]
>>> output = np.diagonal(a, 1)
>>> print(output)
[1]
>>> a = np.arange(8).reshape(2, 2, 2)
>>> print(a)
[[[0 1]
[2 3]]
[[4 5]
[6 7]]]
>>> output = np.diagonal(a, 0, 0, 1)
>>> print(output)
[[0 6]
[1 7]]
"""
ndim = F.rank(a)
if ndim < 2:
return _raise_value_error('diagonal requires an array of at least two dimensions')
dtype = F.dtype(a)
if _is_empty(F.shape(a)):
return _empty(dtype, (0,))
cast_type = dtype
if not isinstance(dtype, Float):
# reduce_sum only supports float types
cast_type = mstype.float32
a = F.cast(a, cast_type)
axes = _check_axis_valid((axis1, axis2), ndim)
perm = ()
for i in range(ndim):
if i not in axes:
perm += (i,)
perm += axes
a = transpose(a, perm)
shape = F.shape(a)
n, m = shape[-2:]
e = _eye(n, m, offset, cast_type)
e = _expand(e, ndim)
e = _broadcast_to(e, F.shape(e), F.shape(a), ndim)
prod = F.tensor_mul(a, e)
res = F.reduce_sum(prod, -1)
begin = ()
for i in range(ndim-2):
begin += (0,)
last_dim_begin = _max(0, -offset)
begin += (last_dim_begin,)
size = F.shape(res)[:-1]
last_dim_end = _min(
shape[-2], _max(0, shape[-1] - offset)) - last_dim_begin
if last_dim_end <= 0:
return _empty(dtype, size + (0,))
size += (last_dim_end,)
res = F.tensor_slice(res, begin, size)
if not _check_same_type(cast_type, dtype):
res = F.cast(res, dtype)
return res
|
64ada8a83fd1162e7d84e84007be0263cd71bd0c
| 3,643,725
|
def available_number_of_windows_in_array(n_samples_array, n_samples_window, n_advance):
"""
Parameters
----------
n_samples_array
n_samples_window
n_advance
Returns
-------
"""
stridable_samples = n_samples_array - n_samples_window
if stridable_samples < 0:
print("Window is longer than the time series")
raise Exception
available_number_of_strides = int(np.floor(stridable_samples / n_advance))
return available_number_of_strides + 1
|
cab937efe4408d4707b601d4a0a68782d062ab36
| 3,643,726
|
import torch
def tensor_to_image(tensor: torch.tensor) -> ndarray:
"""
Convert a torch tensor to a numpy array
:param tensor: torch tensor
:return: numpy array
"""
image = TENSOR_TO_PIL(tensor.cpu().clone().squeeze(0))
return image
|
fb3f16ec6cee1c50d2b7e6f2e31bd94aa300cdfd
| 3,643,727
|
def shimizu_mirioka(XYZ, t, a=0.75, b=0.45):
"""
The Shinizu-Mirioka Attractor.
x0 = (0.1,0,0)
"""
x, y, z = XYZ
x_dt = y
y_dt = (1 - z) * x - a * y
z_dt = x**2 - b * z
return x_dt, y_dt, z_dt
|
60e5b52e1755de8bcc966364d828d47b05af3723
| 3,643,728
|
def draw_cap_peaks_rh_coord(img_bgr, rafts_loc, rafts_ori, raft_sym, cap_offset, rafts_radii, num_of_rafts):
"""
draw lines to indicate the capillary peak positions
in right-handed coordinate
:param numpy array img_bgr: the image in bgr format
:param numpy array rafts_loc: the locations of rafts
:param numpy array rafts_ori: the orientation of rafts, in deg
:param int raft_sym: the symmetry of raft
:param int cap_offset: the angle between the dipole direction
and the first capillary peak, in deg
:param numpy array rafts_radii: radii of the rafts
:param int num_of_rafts: num of rafts
:return: bgr image file
"""
line_thickness = int(2)
line_color2 = (0, 255, 0)
cap_gap = 360 / raft_sym
# cap_offset = 45 # the angle between the dipole direction and the first capillary peak
output_img = img_bgr
height, width, _ = img_bgr.shape
for raft_id in np.arange(num_of_rafts):
for capID in np.arange(raft_sym):
# note that the sign in front of the sine term is "+"
line_start = (rafts_loc[raft_id, 0], height - rafts_loc[raft_id, 1])
line_end = (int(rafts_loc[raft_id, 0] + np.cos((rafts_ori[raft_id] + cap_offset + capID * cap_gap)
* np.pi / 180) * rafts_radii[raft_id]),
height - int(rafts_loc[raft_id, 1] + np.sin((rafts_ori[raft_id] + cap_offset + capID * cap_gap)
* np.pi / 180) * rafts_radii[raft_id]))
output_img = cv.line(output_img, line_start, line_end, line_color2, line_thickness)
return output_img
|
c180a23d7ad6a04d8e56a61a0cb5e058bf1e5d95
| 3,643,729
|
def pack_bidirectional_lstm_state(state, num_layers):
"""
Pack the hidden state of a BiLSTM s.t. the first dimension equals to the number of layers.
"""
assert (len(state) == 2 * num_layers)
_, batch_size, hidden_dim = state.size()
layers = state.view(num_layers, 2, batch_size, hidden_dim).transpose(1, 2).contiguous()
state = layers.view(num_layers, batch_size, -1)
return state
|
de102ce55deceb5ca7211def122dc2767c35cdd3
| 3,643,731
|
import copy
def _create_record_from_template(template, start, end, fasta_reader):
"""Returns a copy of the template variant with the new start and end.
Updates to the start position cause a different reference base to be set.
Args:
template: third_party.nucleus.protos.Variant. The template variant whose
non-location and reference base information to use.
start: int. The desired new start location.
end: int. The desired new end location.
fasta_reader: GenomeReferenceFai object. The reader used to determine the
correct start base to use for the updated variant.
Returns:
An updated third_party.nucleus.protos.Variant with the proper start, end,
and reference base set and all other fields inherited from the template.
"""
retval = copy.deepcopy(template)
retval.start = start
retval.end = end
if start != template.start:
retval.reference_bases = fasta_reader.query(
ranges.make_range(retval.reference_name, start, start + 1))
return retval
|
62c9ff204cff3887daad0df4f710cc54f9c8dad9
| 3,643,732
|
import time
def convert_time(time_string):
"""
Input a time in HH:MM:SS form and output
a time object representing that
"""
return time.strptime(time_string, "%H:%M")
|
f34b46fe8cd242ee12a9768102486cba243d94df
| 3,643,735
|
from typing import Union
import torch
def get_distributed_mean(value: Union[float, torch.Tensor]):
"""Computes distributed mean among all nodes."""
if check_torch_distributed_initialized():
# Fix for runtime warning:
# To copy construct from a tensor, it is recommended to use
# sourceTensor.clone().detach() or
# sourceTensor.clone().detach().requires_grad_(True),
# rather than torch.tensor(sourceTensor).
if torch.is_tensor(value):
value = (
value.clone()
.detach()
.to(device=f"cuda:{torch.cuda.current_device()}")
)
else:
value = torch.tensor(
value,
dtype=torch.float,
device=f"cuda:{torch.cuda.current_device()}",
requires_grad=False,
)
torch.distributed.all_reduce(value)
value = float(value.item() / torch.distributed.get_world_size())
return value
|
9643b522e838a3a0aabcfab7021d4bac7d58e21d
| 3,643,736
|
def js_div(A, B):
""" Jensen-Shannon divergence between two discrete probability
distributions, represented as numpy vectors """
norm_A = A / A.sum()
norm_B = B / B.sum()
M = (norm_A+norm_B)/2
return 0.5 * (kl_div(norm_A,M)+kl_div(norm_B,M))
|
1e4ac763d01f3ae3d25907d24229301d464de527
| 3,643,737
|
from typing import Dict
def _build_request_url(
base: str,
params_dict: Dict[str, str]) -> str:
"""Returns an URL combined from base and parameters
:param base: base url
:type base: str
:param params_dict: dictionary of parameter names and values
:type params_dict: Dict[str, str]
:return: a complete url
:rtype: str
"""
parameters = "&".join([f"{k}={v}" for k, v in params_dict.items()])
url = base + "?" + parameters
return url
|
30e27cf55692884be408218403c2f94279516ad2
| 3,643,738
|
def aesDecrypt(key, data):
"""AES decryption fucnction
Args:
key (str): packed 128 bit key
data (str): packed encrypted data
Returns:
Packed decrypted data string
"""
cipher = python_AES.new(key)
return cipher.decrypt(data)
|
15c70d0699a22bf58ca191ba2fea2d5eb7942b1b
| 3,643,739
|
def __format_number_input(number_input, language):
"""Formats the specified number input.
Args:
number_input (dict): A number input configuration to format.
language (dict): A language configuration used to help format the input configuration.
Returns:
dict: A formatted number input configuration.
"""
placeholder = number_input.get("placeholder")
if placeholder is not None:
number_input["placeholder"] = normalize_configuration_string(placeholder, language["default"])
return number_input
|
0cd76b74396c013d7f76ae5ae11ace56db6552ab
| 3,643,740
|
def get_players(picks):
"""Return the list of players in the team
"""
players = []
for rd in picks:
play = list(rd.keys())
players = players+play
players = list(set(players))
return players
|
79963bc19af662d44d4eaf29a04995ede331706c
| 3,643,741
|
def verify_file_details_exists(device,
root_path,
file,
max_time=30,
check_interval=10):
""" Verify file details exists
Args:
device ('obj'): Device object
root_path ('str'): Root path for command
file ('str'): File name
max_time (`int`): Max time, default: 30
check_interval (`int`): Check interval, default: 10
Returns:
Boolean
Raises:
None
"""
timeout = Timeout(max_time, check_interval)
while timeout.iterate():
out = None
try:
out = device.parse(
'file list {root_path} detail'.format(root_path=root_path))
except SchemaEmptyParserError as e:
timeout.sleep()
continue
file_found = Dq(out).contains_key_value('file-name',
file,
value_regex=True)
if file_found:
return True
timeout.sleep()
return False
|
da0e33ca67b9e70dc4b5345ba626a193bcdefdbd
| 3,643,742
|
import collections
from typing import Literal
def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
"""
Returns a new graph with a VoID description of the passed dataset
For more info on Vocabulary of Interlinked Datasets (VoID), see:
http://vocab.deri.ie/void
This only makes two passes through the triples (once to detect the types
of things)
The tradeoff is that lots of temporary structures are built up in memory
meaning lots of memory may be consumed :)
I imagine at least a few copies of your original graph.
the distinctForPartitions parameter controls whether
distinctSubjects/objects are tracked for each class/propertyPartition
this requires more memory again
"""
typeMap = collections.defaultdict(set)
classes = collections.defaultdict(set)
for e, c in g.subject_objects(RDF.type):
classes[c].add(e)
typeMap[e].add(c)
triples = 0
subjects = set()
objects = set()
properties = set()
classCount = collections.defaultdict(int)
propCount = collections.defaultdict(int)
classProps = collections.defaultdict(set)
classObjects = collections.defaultdict(set)
propSubjects = collections.defaultdict(set)
propObjects = collections.defaultdict(set)
for s, p, o in g:
triples += 1
subjects.add(s)
properties.add(p)
objects.add(o)
# class partitions
if s in typeMap:
for c in typeMap[s]:
classCount[c] += 1
if distinctForPartitions:
classObjects[c].add(o)
classProps[c].add(p)
# property partitions
propCount[p] += 1
if distinctForPartitions:
propObjects[p].add(o)
propSubjects[p].add(s)
if not dataset:
dataset = URIRef("http://example.org/Dataset")
if not res:
res = Graph()
res.add((dataset, RDF.type, VOID.Dataset))
# basic stats
res.add((dataset, VOID.triples, Literal(triples)))
res.add((dataset, VOID.classes, Literal(len(classes))))
res.add((dataset, VOID.distinctObjects, Literal(len(objects))))
res.add((dataset, VOID.distinctSubjects, Literal(len(subjects))))
res.add((dataset, VOID.properties, Literal(len(properties))))
for i, c in enumerate(classes):
part = URIRef(dataset + "_class%d" % i)
res.add((dataset, VOID.classPartition, part))
res.add((part, RDF.type, VOID.Dataset))
res.add((part, VOID.triples, Literal(classCount[c])))
res.add((part, VOID.classes, Literal(1)))
res.add((part, VOID["class"], c))
res.add((part, VOID.entities, Literal(len(classes[c]))))
res.add((part, VOID.distinctSubjects, Literal(len(classes[c]))))
if distinctForPartitions:
res.add(
(part, VOID.properties, Literal(len(classProps[c]))))
res.add((part, VOID.distinctObjects,
Literal(len(classObjects[c]))))
for i, p in enumerate(properties):
part = URIRef(dataset + "_property%d" % i)
res.add((dataset, VOID.propertyPartition, part))
res.add((part, RDF.type, VOID.Dataset))
res.add((part, VOID.triples, Literal(propCount[p])))
res.add((part, VOID.properties, Literal(1)))
res.add((part, VOID.property, p))
if distinctForPartitions:
entities = 0
propClasses = set()
for s in propSubjects[p]:
if s in typeMap:
entities += 1
for c in typeMap[s]:
propClasses.add(c)
res.add((part, VOID.entities, Literal(entities)))
res.add((part, VOID.classes, Literal(len(propClasses))))
res.add((part, VOID.distinctSubjects,
Literal(len(propSubjects[p]))))
res.add((part, VOID.distinctObjects,
Literal(len(propObjects[p]))))
return res, dataset
|
66f8b5824017fd41995783c75de72451d22ea023
| 3,643,743
|
def extract_screen_name_from_twitter_url(url):
"""
Function returning the screen_name from a given Twitter url.
Args:
url (str) : Url from which we extract the screen_name if found.
Returns:
str : screen_name if the url is a valid twitter url, None otherwise.
"""
parsed_twitter_url = parse_twitter_url(url)
if isinstance(parsed_twitter_url, TwitterUser):
return parsed_twitter_url.screen_name
if isinstance(parsed_twitter_url, TwitterTweet):
return parsed_twitter_url.user_screen_name
return None
|
3bbc00f2b4fb0aa0b49154d37802a50204f05ccf
| 3,643,744
|
def sub_vectors(a, b):
"""Subtracts two vectors.
Args:
pos1 (tuple[int]): first position
pos1:(tuple[int]): second position
Returns:
tuple[int]: element wise subtraction
Examples:
>>> sub_vectors((1,4,6), (1,3,7))
(0, 1, -1)
"""
return tuple(a[i] - b[i] for i in range(3))
|
02c35bf46311142a3f3e90cd803d908c6ff63896
| 3,643,745
|
def get_prediction_info(predicted_one_hot, predicted_int, y_test, PLOTS_DIR, filename = "test_file"):
"""
Saves useful information for error analysis in plots directory
:param predicted_one_hot:
:param predicted_int:
:param y_test:
:param PLOTS_DIR:
:return:
"""
def get_info_for_label(label):
false_dict = {}
number = 0
if label == False:
number = 1
for i in range(len(predicted_one_hot)):
false_dict[i] = predicted_one_hot[i][number]
temp_dict = false_dict
sorted_index = sorted(false_dict, key=false_dict.get, reverse=True)
file = str(label) + "\n"
file += "Index;probability;correct?\n"
for i in range(len(sorted_index)):
correct = "No"
index = sorted_index[i]
if predicted_int[index] == y_test[index]:
correct = "Yes"
file += str(index) + ";" + str(temp_dict[index]) + ";" + correct + "\n"
print(sorted_index[:5])
return file, sorted_index
file = "Predictions True;Predictions False;Correctly predicted?\n"
max_true_value = 0.0
max_false_value = 0.0
max_true_index = -1
worst_true_index = -1
max_false_index = -1
worst_false_index = -1
for i, pred in enumerate(predicted_one_hot):
correctly_pred = -1
if predicted_int[i] == y_test[i]:
correctly_pred = "Yes"
else:
correctly_pred = "No"
file += str(pred[0]) + ";" + str(pred[1]) + ";" + str(correctly_pred) + "\n"
if pred[0] > max_true_value:
max_true_value = pred[0]
max_true_index = i
if predicted_int[i] != y_test[i]:
worst_true_index = i
if pred[1] > max_false_value:
max_false_value = pred[1]
max_false_index = i
if predicted_int[i] != y_test[i]:
worst_false_index = i
file += "\nStatistics\n"
file += "max_true_value: " + str(max_true_value) + "\n"
file += "max_true_index: " + str(max_true_index) + "\n"
file += "max_false_value: " + str(max_false_value) + "\n"
file += "max_false_index: " + str(max_false_index) + "\n"
file += "worst_true_index: " + str(worst_true_index) + "\n"
file += "worst_false_index: " + str(worst_false_index) + "\n"
file += "===================================================\n"
file += "===================================================\n"
info_false, sorted_false = get_info_for_label(False)
info_true, sorted_true = get_info_for_label(True)
with open(PLOTS_DIR + filename+".txt", "w+") as text_file:
text_file.write(file + info_false + info_true)
return sorted_true, sorted_false, worst_true_index, worst_false_index
|
76d95c3793ee29c211d7f32e727e9bf046c075eb
| 3,643,746
|
def save_excel_file():
"""File save dialog for an excel file.
Returns:
str: file path
"""
return pick_excel_file(save=True)
|
392c014a959a6d61cfa02ca041d0496560df4dec
| 3,643,747
|
def load_app_paths(file_path=None, dir_path=None, user_file_path=None,
user_dir_path=None, default=None, paths=None, **kwargs):
"""Parse and merge user and app config files
User config will have precedence
:param file_path: Path to the base config file
:param dir_path: Path to the extension config directory
:param user_file_path: Path to the user base config file
:param user_dir_path: Path to the user base config file
:param default: Path to be preppended as the default config file embedded
in the app
:param paths: Extra paths to add to the parsing after the defaults
:param force_extension: only read files with given extension.
:returns: Single dict with all the loaded config
"""
files = [default, file_path, dir_path, user_file_path, user_dir_path]
files += (paths or [])
return load_paths([path for path in files if path], **kwargs)
|
d5f6fe9b8db396f95656d80fea19dc0f95fba642
| 3,643,748
|
def search_playlists(spotify_token, playlist):
"""
:param spotify_token:
:param playlist:
:return:
"""
return _search(spotify_token, query=playlist, type='playlist', limit=9, market='ES', offset=0)
|
cf2ab61a4f967c8cb570471c3fdea2c772d85e8d
| 3,643,749
|
import re
def text_pre_process(result):
""" 이미지에서 인식된 글자를 정제 합니다.
특수문자 제거, 1-2단어 제거, 줄바꿈 및 공백 제거
:param result: 이미지에서 인식된 글자
:return: 문자를 전처리한 결과
"""
copy = str(result)
copy2 = copy.replace("\n", "")
copy3 = re.sub('[^ㄱ-힗]', '', copy2)
# re.sub('[^A-Za-z0-9]', '', copy2)
result = re.sub('[-=+,#}/\{:^$.@*\※~&%ㆍ!『「』\\‘|\(\)\[_ ""\]\<\>`\'…》]', '', copy3)
# shortword = re.compile(r'\W*\b\w{1,2}\b')
# shortword.sub('', result)
# text2 = re.sub(r'\d','',result)
if result is not None and len(result) > 3:
# print(result)
return result
|
c9a25fb19a723d38eb19a8a086a2134369223ea1
| 3,643,750
|
def get_conventional_std_cell(atoms):
"""Given an ASE atoms object, return the ASE atoms object in the conventional standard cell.
It uses symmetries to find the conventional standard cell.
In particular, it gives a structure with a conventional cell according to the standard defined in
W. Setyawan, and S. Curtarolo, Comput. Mater. Sci.49(2), 299-312 (2010). \n
This is simply a wrapper around the pymatgen implementation:
http://pymatgen.org/_modules/pymatgen/symmetry/analyzer.html
Parameters:
atoms: `ase.Atoms` object
Atomic structure.
Returns:
`ase.Atoms` object
Return the structure in a conventional cell.
.. seealso:: To create a standard cell that it is independent from symmetry operations use
:py:mod:`ai4materials.utils.utils_crystals.get_conventional_std_cell_no_sym`
.. codeauthor:: Angelo Ziletti <angelo.ziletti@gmail.com>
"""
# save atoms.info dict otherwise it gets lost in the conversion
atoms_info = atoms.info
mg_structure = AseAtomsAdaptor.get_structure(atoms)
finder = SpacegroupAnalyzer(mg_structure)
mg_structure = finder.get_conventional_standard_structure()
conventional_standard_atoms = AseAtomsAdaptor.get_atoms(mg_structure)
conventional_standard_atoms.info = atoms_info
return conventional_standard_atoms
|
78bf131e8f195bd25b424627d6cce2d5295de248
| 3,643,752
|
def get_if_rcnn(inputs: Tensor):
"""
:param inputs: Tensor from Input Layer
:return:
"""
# get back bone outputs
if_backbones_out = backbones(inputs)
return if_backbones_out
|
02507f10e4dc791b1f201d2c08d3925f2a2dacb5
| 3,643,753
|
import string
import secrets
def method_3(num_chars: int):
"""
Pythonicish way of generating random password
Args:
num_chars (int): Number of Characters the password will be
Returns:
string: The generated password
"""
chars = string.ascii_letters + string.digits + string.punctuation
password = "".join((secrets.choice(chars) for i in range(num_chars)))
return password
|
03fc383ef8c45f1bc618daf4b70646ea824e31df
| 3,643,754
|
def get_animation_for_block(
block_start: int,
frame_num: int,
total_frames: int,
duration: int=5,
):
"""Generate CSS to pop a block from gray to red at the right frame
block_start: int
frame_num: int
total_frames: int
duration: int # seconds"""
animation_function = gray_red_blue
return animation_function(block_start, frame_num, total_frames, duration)
|
9ce9a36a5c7ca5161b89a3306c7e38c9c03d2ee0
| 3,643,755
|
def find_student_by_username(usuario_id, test=False):
"""Consulta toda la información de un estudiante según su usuario."""
query = 'SELECT * FROM estudiante WHERE id_usuario = %s'
return execute_sql(query, args=[usuario_id], rows=1, test=test)
|
ecc67992aef2257d35ccc6dfa05c01afc3d40bb3
| 3,643,756
|
def reduce_mem_usage(df, use_float16=False):
"""
Iterate through all the columns of a dataframe and modify the data type to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024 ** 2
print("Memory usage of dataframe is {:.2f} MB".format(start_mem))
for col in df.columns:
if is_datetime(df[col]) or is_categorical_dtype(df[col]):
continue
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == "int":
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if use_float16 and c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype("category")
end_mem = df.memory_usage().sum() / 1024 ** 2
print("Memory usage after optimization is: {:.2f} MB".format(end_mem))
print("Decreased by {:.1f}%".format(100 * (start_mem - end_mem) / start_mem))
return df
|
842e9c134cd5211fdbe75b0626efa48f68d90c35
| 3,643,757
|
def create_global_step() -> tf.Variable:
"""Creates a `tf.Variable` suitable for use as a global step counter.
Creating and managing a global step variable may be necessary for
`AbstractTrainer` subclasses that perform multiple parameter updates per
`Controller` "step", or use different optimizers on different steps.
In these cases, an `optimizer.iterations` property generally can't be used
directly, since it would correspond to parameter updates instead of iterations
in the `Controller`'s training loop. Such use cases should simply call
`step.assign_add(1)` at the end of each step.
Returns:
A non-trainable scalar `tf.Variable` of dtype `tf.int64`, with only the
first replica's value retained when synchronizing across replicas in
a distributed setting.
"""
return tf.Variable(
0,
dtype=tf.int64,
name="global_step",
trainable=False,
aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA)
|
786ac18f78f092e099844d99b33f5232f53d8a8a
| 3,643,759
|
def rl_label_weights(name=None):
"""Returns the weight for importance."""
with tf.variable_scope(name, 'rl_op_selection'):
num_classes = get_src_num_classes()
num_choices = FLAGS.num_choices
logits = tf.get_variable(
name='logits_rl_w',
initializer=tf.initializers.zeros(),
shape=[num_classes, num_choices],
dtype=tf.float32)
dist_logits_list = logits.value()
dist = tfp.distributions.Categorical(logits=logits)
dist_entropy = tf.reduce_sum(dist.entropy())
sample = dist.sample()
sample_masks = 1. * tf.cast(sample, tf.float32) / num_choices
sample_log_prob = tf.reduce_mean(dist.log_prob(sample))
return (dist_logits_list, dist_entropy, sample_masks, sample_log_prob)
|
b1157c508cb256ab9608ebd78dfbb1ef90cec2b5
| 3,643,760
|
from scipy.stats import mannwhitneyu
from statsmodels.sandbox.stats.multicomp import multipletests
from typing import List
import tqdm
def run_de_test(dataset1: Dataset, dataset2,
test_cells: List[str], control_cells: List[List[str]],
test_label: str = None, control_group_labels: list = None,
exp_frac_thresh: float = 0.25, log2_fc_thresh: float = 1,
qval_thresh: float = 0.05, tqdm_msg: str = '') -> pd.DataFrame:
"""
Identifies differentially expressed genes using Mann Whitney U test.
:param dataset1: nabo.Dataset instance
:param dataset2: nabo.Dataset instance or None
:param test_cells: list of cells for which markers has to be found.
These could be cells from a cluster,cells with high
mapping score, etc
:param control_cells: List of cell groups against which markers need to
be found. This could just one groups of cells or
multiple groups of cells.
:param test_label: Label for test cells.
:param control_group_labels: Labels of control cell groups
:param exp_frac_thresh: Fraction of cells that should have a non zero
value for a gene.
:param log2_fc_thresh: Threshold for log2 fold change
:param qval_thresh: Threshold for adjusted p value
:param tqdm_msg: Message to print while displaying progress
:return: pd.Dataframe
"""
test_cells_idx = [dataset1.cellIdx[x] for x in test_cells]
control_cells_idx_group = []
for i in control_cells:
if dataset2 is None:
control_cells_idx_group.append([dataset1.cellIdx[x] for x in i])
else:
control_cells_idx_group.append([dataset2.cellIdx[x] for x in i])
if test_label is None:
test_label = 'Test group'
if control_group_labels is None:
control_group_labels = ['Ctrl group %d' % x for x in range(len(
control_cells_idx_group))]
num_test_cells = len(test_cells_idx)
num_groups = len(control_cells_idx_group)
min_n = [min(num_test_cells, len(x)) for x in control_cells_idx_group]
n1n2 = [num_test_cells * x for x in min_n]
if dataset2 is None:
valid_genes = {dataset1.genes[x]: None for x in dataset1.keepGenesIdx}
else:
valid_genes = {}
control_gene_list = {x: None for x in dataset2.genes}
for i in dataset1.keepGenesIdx:
gene = dataset1.genes[i]
if gene in control_gene_list:
valid_genes[gene] = None
del control_gene_list
de = []
for gene in tqdm(valid_genes, bar_format=tqdm_bar, desc=tqdm_msg):
rbc, mw_p, log_fc = 0, 1, 0
all_vals = dataset1.get_norm_exp(gene)
test_vals = all_vals[test_cells_idx]
ef = np.nonzero(test_vals)[0].shape[0] / num_test_cells
if ef < exp_frac_thresh:
continue
if dataset2 is None:
all_control_vals = all_vals
else:
all_control_vals = dataset2.get_norm_exp(gene)
log_mean_test_vals = np.log2(test_vals.mean())
for i in range(num_groups):
control_vals = all_control_vals[control_cells_idx_group[i]]
control_vals.sort()
control_vals = control_vals[-min_n[i]:]
mean_control_vals = control_vals.mean()
if mean_control_vals == 0:
log_fc = np.inf
else:
log_fc = log_mean_test_vals - np.log2(mean_control_vals)
if log_fc < log2_fc_thresh:
continue
try:
u, mw_p = mannwhitneyu(test_vals, control_vals)
except ValueError:
pass
else:
rbc = 1 - ((2 * u) / n1n2[i])
de.append((gene, ef, control_group_labels[i], rbc, log_fc, mw_p))
de = pd.DataFrame(de, columns=['gene', 'exp_frac', 'versus_group',
'rbc', 'log2_fc', 'pval'])
if de.shape[0] > 1:
de['qval'] = multipletests(de['pval'].values, method='fdr_bh')[1]
else:
de['qval'] = [np.nan for _ in range(de.shape[0])]
de['test_group'] = [test_label for _ in range(de.shape[0])]
out_order = ['gene', 'exp_frac', 'test_group', 'versus_group',
'rbc', 'log2_fc', 'pval', 'qval']
de = de[out_order].sort_values(by='qval')
return de[(de.qval < qval_thresh)].reset_index().drop(columns=['index'])
|
422082785845918f7e999125b7e57e6c1fbcb535
| 3,643,761
|
def say_hello_twice(subject):
"""Says hello twice using `say_hello`."""
return say_hello(subject) + " " + say_hello(subject)
|
66a6fafca01f6ddc6304fef15aea27bb15c23416
| 3,643,762
|
def get_zones(ec2):
"""
Return all available zones in the region
"""
zones = []
try:
aws_zones = ec2.describe_availability_zones()['AvailabilityZones']
except ClientError as e:
print(e.response['Error']['Message'])
return None
for zone in aws_zones:
if zone['State'] == 'available':
zones.append(zone['ZoneName'])
return zones
|
acd023bcf5863aff0cd562f6c097062d9693738d
| 3,643,763
|
import torch
def x_gate():
"""
Pauli x
"""
return torch.tensor([[0, 1], [1, 0]]) + 0j
|
736d72d832380ea5a1d6c4a840cb6aa0050638e5
| 3,643,764
|
def merge_dictionaries(default_dictionary, user_input_dictionary, path=None):
"""Merges user_input_dictionary into default dictionary;
default values will be overwritten by users input."""
return {**default_dictionary, **user_input_dictionary}
|
ea600efcd69e920ae536fa2f22a4c883a71d8ad3
| 3,643,765
|
def create_frequencyvector(T_end, f_max_requested):
""" A function to create the vector of frequencies we need to solve using the reflectivity
method, to achieve the desired length of time and highest modelled frequency.
NOTE: Because we require the number of frequencies to be odd, the maximum frequency may
change.
Returns the frequency vector and the corresponding time step dt.
"""
# T_end : End time of simulation
# f_max_requested : Maximum desired frequency to be modelled
# Minimum modelled frequency (always 0 for now)
f_min = 0
# Frequency resolution
df = 1 / T_end
# Number of frequencies (round up if needed), + 1 for the first frequency (zero)
n_f = np.ceil((f_max_requested - f_min) / df) + 1
n_f = n_f.astype(int)
# Make sure the number of frequencies is odd
if n_f % 2 != 1:
n_f += 1
# Maximum modelled frequency (accurate), -1 for the first frequency which is zero
f_max_actual = (n_f - 1) * df
assert f_max_actual >= f_max_requested, 'Actual frequency too low'
dt = 1 / (2 * f_max_actual)
freq = np.linspace(0, f_max_actual, n_f)
return freq, dt
|
d402840259bdc0049c580e057a6de815dfaa02f1
| 3,643,766
|
def get_fiber_protein_intake(
nutrients_lower_lists, nutrients_middle_lists,nutrients_upper_lists):
"""Gets financial class-wise fibee and protein intake data."""
lower_fiber_prot = nutrients_lower_lists.map(lambda x: (x[1], x[3]))
middle_fiber_prot = nutrients_middle_lists.map(lambda x: (x[1], x[3]))
upper_fiber_prot = nutrients_upper_lists.map(lambda x: (x[1], x[3]))
return lower_fiber_prot, middle_fiber_prot, upper_fiber_prot
|
990293236a10ed18960393b39dbfb46652fca51d
| 3,643,767
|
def _add_fvar(font, axes, instances, axis_map):
"""
Add 'fvar' table to font.
axes is a dictionary mapping axis-id to axis (min,default,max)
coordinate values.
instances is list of dictionary objects with 'location', 'stylename',
and possibly 'postscriptfontname' entries.
axisMap is dictionary mapping axis-id to (axis-tag, axis-name).
"""
assert "fvar" not in font
font['fvar'] = fvar = newTable('fvar')
nameTable = font['name']
for iden in sorted(axes.keys(), key=lambda k: axis_map[k][0]):
axis = Axis()
axis.axisTag = Tag(axis_map[iden][0])
axis.minValue, axis.defaultValue, axis.maxValue = axes[iden]
axisName = tounicode(axis_map[iden][1])
axis.axisNameID = nameTable.addName(axisName)
fvar.axes.append(axis)
for instance in instances:
coordinates = instance['location']
name = tounicode(instance['stylename'])
psname = instance.get('postscriptfontname')
inst = NamedInstance()
inst.subfamilyNameID = nameTable.addName(name)
if psname is not None:
psname = tounicode(psname)
inst.postscriptNameID = nameTable.addName(psname)
inst.coordinates = {axis_map[k][0]:v for k,v in coordinates.items()}
fvar.instances.append(inst)
return fvar
|
20836c91121603610f5bfb19777e4b6f440f2007
| 3,643,768
|
import tqdm
def init_nornir(username, password):
"""INITIALIZES NORNIR SESSIONS"""
nr = InitNornir(
config_file="network_automation/topology_builder/graphviz/config/config.yml"
)
nr.inventory.defaults.username = username
nr.inventory.defaults.password = password
managed_devs = nr.filter(F(groups__contains="ios_devices") | F(groups__contains="nxos_devices"))
with tqdm(total=len(managed_devs.inventory.hosts)) as progress_bar:
results = managed_devs.run(task=get_data_task, progress_bar=progress_bar)
hosts_failed = list(results.failed_hosts.keys())
if hosts_failed != []:
auth_fail_list = list(results.failed_hosts.keys())
for dev in auth_fail_list:
dev_auth_fail_list.add(dev)
print(f"Authentication Failed: {auth_fail_list}")
print(
f"{len(list(results.failed_hosts.keys()))}/{len(managed_devs.inventory.hosts)} devices failed authentication..."
)
return managed_devs, results, dev_auth_fail_list
|
edf6a45a2ce9dc7799351892c1b53ab2b949607c
| 3,643,769
|
def _format_rest_url(host: str, append: str = "") -> str:
"""Return URL used for rest commands."""
return f"http://{host}:8001/api/v2/{append}"
|
1d5ace3919da004e648cb6c7d6d80fe72903c0e1
| 3,643,770
|
from typing import Optional
from typing import Set
def get_synonyms(prefix: str) -> Optional[Set[str]]:
"""Get the synonyms for a given prefix, if available."""
entry = get_resource(prefix)
if entry is None:
return None
return entry.get_synonyms()
|
44e4fc7259f9536dc192a08566b8db7f9256f916
| 3,643,772
|
def results(request):
""" Returns the actual body of the search results, for AJAX stuff """
query = request.GET.get("q", "")
if len(query) >= 4:
ctx = _search_context(query, request.user)
return TemplateResponse(request, "search/results.html", ctx)
return TemplateResponse(request, "search/too_short.html", {})
|
a6282dd489e3406ebc8b2349159b58d4cb0e1fd4
| 3,643,773
|
def is_correlated(corr_matrix, feature_pairs, rho_threshold=0.8):
"""
Returns dict where the key are the feature pairs and the items
are booleans of whether the pair is linearly correlated above the
given threshold.
"""
results = {}
for pair in feature_pairs:
f1, f2 = pair.split("__")
corr = corr_matrix[f1][f2]
results[pair] = round(corr, 3) >= rho_threshold
return results
|
18afa0cc24f5d9205cde3c8ad23f70d73b5c395b
| 3,643,774
|
def find_password(liste, login):
""" """
for user in liste:
if user[0] == login:
return user[1]
return None
|
8f61072a8b1cc34eb27c1665b1cd34aeb6630ce2
| 3,643,775
|
def sample_weather_scenario():
"""
Generate a weather scenario with known values for the wind condition.
"""
times = pd.date_range('1/1/2000', periods=72, freq='6H')
latitude = np.linspace(0, 10, 11)
longitude = np.linspace(0, 10, 11)
wsp_vals = np.full((72, 11, 11), 10.0)
wdi_vals = np.full((72, 11, 11), 0.0)
cusp_vals = np.full((72, 11, 11), 0.0)
cudi_vals = np.full((72, 11, 11), 0.0)
wadi_vals = np.full((72, 11, 11), 0.0)
wahi_vals = np.full((72, 11, 11), 0.0)
wisp = xr.DataArray(wsp_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
widi = xr.DataArray(wdi_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
cusp = xr.DataArray(cusp_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
cudi = xr.DataArray(cudi_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
wahi = xr.DataArray(cusp_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
wadi = xr.DataArray(cudi_vals, dims=['time', 'lon_b', 'lat_b'],
coords={'time': times,
'lon_b': longitude,
'lat_b': latitude})
return wisp, widi, cusp, cudi, wahi, wadi
|
124f43b090149bb23e52a88a03c441d1311c5bea
| 3,643,776
|
import torch
def ToTensor(pic):
"""Converts a PIL.Image or numpy.ndarray (H x W x C) in the range
[0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
"""
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img.float().div(255)
if pic.mode == "I":
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == "I;16":
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
if pic.mode == "YCbCr":
nchannel = 3
elif pic.mode == "I;16":
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float().div(255)
else:
return img
|
c3ed682c520f17f24169377b2a3016510e9724f9
| 3,643,778
|
def part2(data):
"""
>>> part2([[43, 19], [2, 29, 14]])
105
>>> part2([[9, 2, 6, 3, 1], [5, 8, 4, 7, 10]])
291
>>> part2(read_input())
32528
"""
deck_one = tuple(data[0])
deck_two = tuple(data[1])
_, winning_deck = combat(deck_one, deck_two)
return score(winning_deck)
|
226eb835030716cdfa30310e53bc76c5dd3a4e7e
| 3,643,779
|
def dehyphenate(string):
"""Remove hyphenated linebreaks from 'string'."""
return hyphen_newline_re.sub("", string)
|
6894fd7972c3990fd00e5818e0b30e48e78017e0
| 3,643,781
|
def grounder(img, dtype=None):
"""Tries to remove absolute offset
'img' must be a 3 colors image"""
shape = img.shape
"""
# Mise en forme
a = img.reshape((shape[0] * shape[1], 3))
min = np.zeros(a.shape)
max = np.zeros(a.shape)
# Minimas/maximas
min[:,0] = min[:,1] = min[:,2] = a.min(axis=1)
max[:,0] = max[:,1] = max[:,2] = a.max(axis=1)
# Remise en forme
min = min.reshape(shape)
max = max.reshape(shape)
# Remise au ras du sol
grounded = img - min
# return (grounded / max).astype(np.float32)
return (grounded / 255.0).astype(np.float32)
"""#"""
min = coloroffset(img)
grounded = img - min
if dtype is not None:
grounded = grouded.astype(dtype)
return grounded
|
55a022a26f457cf0ec76d4ed8fa37f470db31e11
| 3,643,782
|
def arch_prob(arch, dims, **kwds):
""" Returns the combined probability of for arch given values """
values = dict(kwds)
dimkeys = list(dims.keys())
assert isinstance(arch, (tuple, list)), "Archictecture must be tuple or list"
serial = isinstance(arch, list)
probs = [None] * len(arch)
for i, subarch in enumerate(arch):
keyset = subarch.keylist
vals = {key: values[key] for key in dimkeys if key in keyset}
subdims = {key: dims[key] for key in dimkeys if key in keyset}
probs[i] = subarch.eval_prob(vals, subdims)
if serial:
return probs[-1]
pscales = [subarch.pscale for subarch in arch]
prob, pscale = prod_rule(*tuple(probs), pscales=pscales)
return prob
|
94778c471d4ae3fe534af50543ef9465a6b2e793
| 3,643,783
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.