content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
import warnings
def _generate_input_weights(
N,
dim_input,
dist="custom_bernoulli",
connectivity=1.0,
dtype=global_dtype,
sparsity_type="csr",
seed=None,
input_bias=False,
**kwargs,
):
"""Generate input or feedback weights for a reservoir.
Weights are drawn by default from a discrete Bernoulli random variable,
i.e. are always equal to 1 or -1. Then, they can be rescaled to a specific constant
using the `input_scaling` parameter.
Warning
-------
This function is deprecated since version v0.3.1 and will be removed in future
versions. Please consider using :py:func:`bernoulli` or :py:func:`random_sparse`
instead.
Parameters
----------
N: int
Number of units in the connected reservoir.
dim_input: int
Dimension of the inputs connected to the reservoir.
dist: str, default to "norm"
A distribution name from :py:mod:`scipy.stats` module, such as "norm" or
"uniform". Parameters like `loc` and `scale` can be passed to the distribution
functions as keyword arguments to this function. Usual distributions for
internal weights are :py:class:`scipy.stats.norm` with parameters `loc` and
`scale` to obtain weights following the standard normal distribution,
or :py:class:`scipy.stats.uniform` with parameters `loc=-1` and `scale=2`
to obtain weights uniformly distributed between -1 and 1.
Can also have the value "custom_bernoulli". In that case, weights will be drawn
from a Bernoulli discrete random variable alternating between -1 and 1 and
drawing 1 with a probability `p` (default `p` parameter to 0.5).
connectivity: float, default to 0.1
Also called density of the sparse matrix.
input_scaling: float or array, optional
If defined, then will rescale the matrix using this coefficient or array
of coefficients.
input_bias: bool, optional
'input_bias' parameter is deprecated. Bias should be initialized
separately from the input matrix.
If True, will add a row to the matrix to take into
account a constant bias added to the input.
dtype : numpy.dtype, default to numpy.float64
A Numpy numerical type.
sparsity_type : {"csr", "csc", "dense"}, default to "csr"
If connectivity is inferior to 1 and shape is only 2-dimensional, then the
function will try to use one of the Scipy sparse matrix format ("csr" or "csc").
Else, a Numpy array ("dense") will be used.
seed : optional
Random generator seed. Default to the global value set with
:py:func:`reservoirpy.set_seed`.
**kwargs : optional
Arguments for the scipy.stats distribution.
Returns
-------
Numpy array or callable
If a shape is given to the initializer, then returns a matrix.
Else, returns a function partialy initialized with the given keyword parameters,
which can be called with a shape and returns a matrix.
"""
warnings.warn(
"'generate_input_weights' is deprecated since v0.3.1 and will be removed in "
"future versions. Consider using 'normal', 'uniform' or 'random_sparse'.",
DeprecationWarning,
)
if input_bias:
warnings.warn(
"'input_bias' parameter is deprecated. Bias should be initialized "
"separately from the input matrix.",
DeprecationWarning,
)
dim_input += 1
return _random_sparse(
N,
dim_input,
connectivity=connectivity,
dtype=dtype,
dist=dist,
sparsity_type=sparsity_type,
seed=seed,
**kwargs,
)
|
4201fda2f693d0ee0f189e94762de09877059b08
| 3,643,895
|
import re
def _get_variable_name(param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
|
4f6258667383c80b584054af20ac9a61cf25381f
| 3,643,896
|
def np_gather(params, indices, axis=0, batch_dims=0):
"""numpy gather"""
if batch_dims == 0:
return gather(params, indices)
result = []
if batch_dims == 1:
for p, i in zip(params, indices):
axis = axis - batch_dims if axis - batch_dims > 0 else 0
r = gather(p, i, axis=axis)
result.append(r)
return np.stack(result)
for p, i in zip(params[0], indices[0]):
r = gather(p, i, axis=axis)
result.append(r)
res = np.stack(result)
return res.reshape((1,) + res.shape)
|
9dc89cb6e48a6c8126fbee1421a4d7058f35b9e0
| 3,643,897
|
def texture(data):
"""Compute the texture of data.
Compute the texture of the data by comparing values with a 3x3 neighborhood
(based on :cite:`Gourley2007`). NaN values in the original array have
NaN textures.
Parameters
----------
data : :class:`numpy:numpy.ndarray`
multi-dimensional array with shape (..., number of beams, number
of range bins)
Returns
------
texture : :class:`numpy:numpy.ndarray`
array of textures with the same shape as data
"""
# one-element wrap-around padding
x = np.pad(data, 1, mode='wrap')
# set first and last range elements to NaN
x[:, 0] = np.nan
x[:, -1] = np.nan
# get neighbours using views into padded array
x1 = x[..., :-2, 1:-1] # center:2
x2 = x[..., 1:-1, :-2] # 4
x3 = x[..., 2:, 1:-1] # 8
x4 = x[..., 1:-1, 2:] # 6
x5 = x[..., :-2, :-2] # 1
x6 = x[..., :-2, 2:] # 3
x7 = x[..., 2:, 2:] # 9
x8 = x[..., 2:, :-2] # 7
# stack arrays
xa = np.array([x1, x2, x3, x4, x5, x6, x7, x8])
# get count of valid neighbouring pixels
xa_valid_count = np.count_nonzero(~np.isnan(xa), axis=0)
# root mean of squared differences
rmsd = np.sqrt(np.nansum((xa - data) ** 2, axis=0) / xa_valid_count)
# reinforce that NaN values should have NaN textures
rmsd[np.isnan(data)] = np.nan
return rmsd
|
e1a57e9e37a1730de5c4e919ba6fa65eaf301c79
| 3,643,898
|
from typing import Tuple
def joos_2013_monte_carlo(
runs: int = 100, t_horizon: int = 1001, **kwargs
) -> Tuple[pd.DataFrame, np.ndarray]:
"""Runs a monte carlo simulation for the Joos_2013 baseline IRF curve.
This function uses uncertainty parameters for the Joos_2013 curve calculated by
Olivie and Peters (2013): https://esd.copernicus.org/articles/4/267/2013/
Parameters
----------
runs : int
Number of runs for Monte Carlo simulation. Must be >1.
t_horizon : int
Length of the time horizon over which baseline curve is
calculated (years)
Returns
-------
summary : pd.DataFrame
Dataframe with 'mean', '+sigma', and '-sigma' columns summarizing
results of Monte Carlo simulation.
results : np.ndarray
Results from all Monte Carlo runs.
"""
if runs <= 1:
raise ValueError('number of runs must be >1')
results = np.zeros((t_horizon, runs))
# Monte Carlo simulations
# sigma and x are from Olivie and Peters (2013) Table 5 (J13 values)
# They are the covariance and mean arrays for CO2 IRF uncertainty
sigma = np.array(
[
[0.129, -0.058, 0.017, -0.042, -0.004, -0.009],
[-0.058, 0.167, -0.109, 0.072, -0.015, 0.003],
[0.017, -0.109, 0.148, -0.043, 0.013, -0.013],
[-0.042, 0.072, -0.043, 0.090, 0.009, 0.006],
[-0.004, -0.015, 0.013, 0.009, 0.082, 0.013],
[-0.009, 0.003, -0.013, 0.006, 0.013, 0.046],
]
)
x = np.array([5.479, 2.913, 0.496, 0.181, 0.401, -0.472])
p_samples = multivariate_normal.rvs(x, sigma, runs)
p_df = pd.DataFrame(p_samples, columns=['t1', 't2', 't3', 'b1', 'b2', 'b3'])
p_exp = np.exp(p_df)
a1 = p_exp['b1'] / (1 + p_exp['b1'] + p_exp['b2'] + p_exp['b3'])
a2 = p_exp['b2'] / (1 + p_exp['b1'] + p_exp['b2'] + p_exp['b3'])
a3 = p_exp['b3'] / (1 + p_exp['b1'] + p_exp['b2'] + p_exp['b3'])
tau1 = p_exp['t1']
tau2 = p_exp['t2']
tau3 = p_exp['t3']
for count in np.arange(runs):
co2_kwargs = {
'a1': a1[count],
'a2': a2[count],
'a3': a3[count],
'tau1': tau1[count],
'tau2': tau2[count],
'tau3': tau3[count],
}
irf = joos_2013(t_horizon, **co2_kwargs)
results[:, count] = irf
summary = pd.DataFrame(columns=['mean', '-2sigma', '+2sigma', '5th', '95th'])
summary['mean'] = np.mean(results, axis=1)
summary['+2sigma'] = summary['mean'] + (1.96 * np.std(results, axis=1))
summary['-2sigma'] = summary['mean'] - (1.96 * np.std(results, axis=1))
summary['5th'] = np.percentile(results, 5, axis=1)
summary['95th'] = np.percentile(results, 95, axis=1)
return summary, results
|
3fd791eae464bd1c73fcbf3fa16c7e8634dd6f80
| 3,643,899
|
import torch
def pairwise_l1_loss(outputs, targets):
"""
"""
batch_size = outputs.size()[0]
if batch_size < 3:
pair_idx = np.arange(batch_size, dtype=np.int64)[::-1].copy()
pair_idx = torch.from_numpy(pair_idx).cuda()
else:
pair_idx = torch.randperm(batch_size).cuda()
#diff_outputs = torch.sigmoid(outputs) - torch.sigmoid(outputs[pair_idx])
diff_outputs = outputs - outputs[pair_idx]
diff_targets = targets - targets[pair_idx]
loss = nn.L1Loss()(diff_outputs, diff_targets)
return loss
|
4030a83bbdb5575ff7735328134d72748bc6af51
| 3,643,900
|
def get_mwis(input_tree):
"""Get minimum weight independent set
"""
num_nodes = input_tree['num_nodes']
nodes = input_tree['nodes']
if num_nodes <= 0:
return []
weights = [0, nodes[0][0]]
for idx, node_pair in enumerate(nodes[1:], start=1):
node_weight, node_idx = node_pair
wis_prime = weights[idx]
prime2_index = max(1, idx) - 1
wis_prime2 = weights[prime2_index] + node_weight
weights.append(max(wis_prime, wis_prime2))
return weights
|
3df82615d1060756b1a4863fe168ea542dfed4f9
| 3,643,901
|
def info(tid, alternate_token=False):
"""
Returns transaction information for the transaction
associated with the passed transaction ID
:param id: String with transaction ID.
:return: Dictionary with information about transaction.
"""
if not tid:
raise Exception('info() requires id parameter')
return r._get('/transactions/' + tid,
{
'oauth_token': alternate_token if alternate_token else c.access_token,
'client_id': c.client_id,
'client_secret': c.client_secret
})
|
109e3f34603dacfc3b31e5d90cd092f02a45b4f7
| 3,643,902
|
def find_extrema(array, condition):
"""
Advanced wrapper of numpy.argrelextrema
Args:
array (np.ndarray): data array
condition (np.ufunc): e.g. np.less (<), np.great_equal (>=) and etc.
Returns:
np.ndarray: indexes of extrema
np.ndarray: values of extrema
"""
# get indexes of extrema
indexes = argrelextrema(array, condition)[0]
# in case where data line is horisontal and doesn't have any extrema -- return None
if len(indexes) == 0:
return None, None
# get values based on found indexes
values = array[indexes]
# calc the difference between nearby extrema values
diff_nearby_extrema = np.abs(np.diff(values, n=1))
# form indexes where no twin extrema (the case when data line is horisontal and have two extrema on borders)
indexes = np.array([index for index, diff in zip(indexes, diff_nearby_extrema) if diff > 0] + [indexes[-1]])
# get values based on filtered indexes
values = array[indexes]
return indexes, values
|
a356c2af0d992dbc447802120094dd8880f80e3e
| 3,643,903
|
def compute_all_metrics_statistics(all_results):
"""Computes statistics of metrics across multiple decodings."""
statistics = {}
for key in all_results[0].keys():
values = [result[key] for result in all_results]
values = np.vstack(values)
statistics[key + "_MEAN"] = np.mean(values, axis=0)
statistics[key + "_STD"] = np.std(values, axis=0)
statistics[key + "_MIN"] = np.min(values, axis=0)
statistics[key + "_MAX"] = np.max(values, axis=0)
return statistics
|
c1708e78a375ddac0438b31c16f2fedcd357a4d9
| 3,643,904
|
from typing import Tuple
def parse_pubkey(expr: str) -> Tuple['PubkeyProvider', str]:
"""
Parses an individual pubkey expression from a string that may contain more than one pubkey expression.
:param expr: The expression to parse a pubkey expression from
:return: The :class:`PubkeyProvider` that is parsed as the first item of a tuple, and the remainder of the expression as the second item.
"""
end = len(expr)
comma_idx = expr.find(",")
next_expr = ""
if comma_idx != -1:
end = comma_idx
next_expr = expr[end + 1:]
return PubkeyProvider.parse(expr[:end]), next_expr
|
8731384a7aca25a5655c474925677f1e8dff9252
| 3,643,905
|
def XOR(*conditions):
"""
Creates an XOR clause between all conditions, e.g.
::
x <> 1 XOR y <> 2
*conditions* should be a list of column names.
"""
assert conditions
return _querybuilder.logical_xor(conditions)
|
a2870b7bbafa5247fd3786d5ca10446ae9d7662a
| 3,643,906
|
def Interpolator(name=None, logic=None):
"""Returns an interpolator
:param name: Specify the name of the solver
:param logic: Specify the logic that is going to be used.
:returns: An interpolator
:rtype: Interpolator
"""
return get_env().factory.Interpolator(name=name, logic=logic)
|
0f2e33f3bb98578f8f24236fb4e0c32875183f38
| 3,643,907
|
def trim_to_min_length(bits):
"""Ensures 'bits' have min number of leading zeroes.
Assumes 'bits' is big-endian, and that it needs to be encoded in 5 bit blocks.
"""
bits = bits[:] # copy
# make sure we can be split into 5 bit blocks
while bits.len % 5 != 0:
bits.prepend('0b0')
# Get minimal length by trimming leading 5 bits at a time.
while bits.startswith('0b00000'):
if len(bits) == 5:
break # v == 0
bits = bits[5:]
return bits
|
d740ce27e0ebce30f382844a9810f7792c9b4669
| 3,643,908
|
def get_attributes_callback(get_offers_resp):
"""Callback fn for when get_attributes is called asynchronously"""
return AttributesProvider(get_offers_resp)
|
e901d2914cb1653454b4b19e4948b02f1ed304c8
| 3,643,910
|
def display(choices, slug):
"""
Get the display name for a form choice based on its slug. We need this function
because we want to be able to store ACS data using the human-readable display
name for each field, but in the code we want to reference the fields using their
slugs, which are easier to change.
:param choices: A list of tuples representing Django-style form choices.
:param slug: The slug of the choice to select.
:return: The display name for the given slug.
"""
for choice_slug, display_name in choices:
if choice_slug == slug:
return display_name
raise NameError('No choice for for slug {} in {}'.format(slug, str(choices)))
|
e177fa4596de8a9921d05216d51344e95dce89ab
| 3,643,911
|
def autocorrelation(data):
"""Autocorrelation routine.
Compute the autocorrelation of a given signal 'data'.
Parameters
----------
data : darray
1D signal to compute the autocorrelation.
Returns
-------
ndarray
the autocorrelation of the signal x.
"""
n_points = len(data)
variance = data.var()
data = data - data.mean()
corr = correlate(data, data, mode='full')[-n_points:]
result = corr / (variance * arange(n_points, 0, -1))
return result
|
205a4ffd7a3b2f6cd4608edc58407478c0e29588
| 3,643,912
|
def yes_or_no(question, default="no"):
"""
Returns True if question is answered with yes else False.
default: by default False is returned if there is no input.
"""
answers = "yes|[no]" if default == "no" else "[yes]|no"
prompt = "{} {}: ".format(question, answers)
while True:
answer = input(prompt).lower()
if answer == '':
answer = default
if answer in ['no', 'n']:
return False
elif answer in ['yes', 'y']:
return True
|
496137bcd3d99a3f0bcc5bb87ab3dc090f8fc414
| 3,643,913
|
import requests
from io import StringIO
def query_airnow(param, data_period, bbox, key=None):
"""Construct an AirNow API query request and parse response.
Args:
param (str):
The evaluation parameter for which to query data.
data_period (list):
List with two elements, the first is the start date and time for
the query and the second is the end date and time for the query.
The API is sequentially queried in monthly intervals, so the start
date will usually be something like '2021-01-01T00' and the end
date will follow as '2021-01-31T23'.
bbox (dict):
Bounding box of latitude andlongitude values for AirNow API
queries.
key (str):
User key for API authentication.
Returns:
data (pandas DataFrame):
Data returned by the API for the specified query parameter and
time period.
"""
if type(param) is str:
param_list = [param]
elif type(param) is list:
param_list = param
else:
raise TypeError('Invalid type specified for "param". Must be either '
'str or list.')
begin = data_period[0][:-3]
end = data_period[1][:-3]
print('..Query start:', begin)
print('..Query end:', end)
# API Items
urlbase = "http://www.airnowapi.org/aq/data/?"
dataType = "C"
dataformat = "text/csv"
verbose = "1" # bool
nowcastonly = "0" # bool
rawconc = "1" # bool
# Construct query URL
url = urlbase + 'startdate=' + str(data_period[0])
url += '&enddate=' + str(data_period[1])
url += '¶meters=' + ','.join(param_list)
url += '&bbox=' + str(bbox["minLong"]) + ','
url += str(bbox["minLat"]) + ','
url += str(bbox["maxLong"]) + ','
url += str(bbox["maxLat"])
url += '&datatype=' + str(dataType)
url += '&format=' + str(dataformat)
url += '&verbose=' + str(verbose)
url += '&nowcastonly=' + str(nowcastonly)
url += '&includerawconcentrations=' + str(rawconc)
url += '&api_key=' + str(key)
# Get query response
data = requests.get(url)
fmt_query_data = StringIO(data.text)
data = pd.read_csv(fmt_query_data, sep=',',
names=['Site_Lat', 'Site_Lon', 'DateTime',
'Param_Name', 'Param_NowCast_Value',
'Param_Unit', 'Param_Value', 'Site_Name',
'Agency', 'Site_AQS', 'Site_Full_AQS'])
if data.empty:
status = 'Failed'
else:
status = 'Success'
data['Site_AQS'] = data['Site_AQS'].astype(str)
state_id = data['Site_AQS'].str.slice(0, 2)
county_id = data['Site_AQS'].str.slice(2, 5)
site_id = data['Site_AQS'].str.slice(5, 9)
data['Site_AQS'] = (state_id + '-' + county_id + '-' + site_id)
site_name = list(i for i in data.Site_Name.unique())
site_aqs = list(i for i in data.Site_AQS.astype(str).unique())
site_lat = list(i for i in data.Site_Lat.astype(str).unique())
site_lon = list(i for i in data.Site_Lon.astype(str).unique())
print('..Query site(s):')
for name, aqs, lat, lon in zip(site_name, site_aqs,
site_lat, site_lon):
print('....Site name:', name)
print('......AQS ID:', aqs)
print('......Latitude:', "{0:7.4f}".format(float(lat)))
print('......Longitude:', "{0:7.4f}".format(float(lon)))
# Print warning if data from multiple sites are returned
if any(len(lst) > 1 for lst in [site_name, site_aqs,
site_lat, site_lon]):
print('..Warning: Query returned data from multiple sites.',
'\n..Site selection can be narrowed by reducing the '
'bounding box size.')
print('..Query Status:', status)
return data
|
97dbfddd4abaee8491c7aa90e2d5c46796e825ac
| 3,643,914
|
def compile(string):
"""
Compile a string to a template function for the path.
"""
return tokens_to_function(parse(string))
|
ace794e2378b493b8d6ea06c0185ada5e90289a2
| 3,643,915
|
import math
def flajolet_martin(data, k):
"""Estimates the number of unique elements in the input set values.
Inputs:
data: The data for which the cardinality has to be estimated.
k: The number of bits of hash to use as a bucket number. The number of buckets is 2^k
Output:
Returns the estimated number of unique items in the dataset
"""
total_buckets = 2 ** k
total_zeroes = []
for i in range(total_buckets):
total_zeroes.append(0)
for i in data:
h = hash(str(i)) #convert the value into a string because python hashes integers to themselves
bucket = h & (total_buckets - 1) #Finds the bucket where the number of ending zero's are appended
bucket_hash = h >> k #move the bits of the hash to the right to use the binary digits without the bucket digits
total_zeroes[bucket] = max(total_zeroes[bucket], zero_counter(bucket_hash))
return math.ceil(2 ** (float(sum(total_zeroes)) / total_buckets) * total_buckets * 0.79402)
|
da7513b8c672278eebf00e71ca8a37a442b7fee0
| 3,643,916
|
def decode(encoded: list):
"""Problem 12: Decode a run-length encoded list.
Parameters
----------
encoded : list
The encoded input list
Returns
-------
list
The decoded list
Raises
------
TypeError
If the given argument is not of `list` type
"""
if not isinstance(encoded, list):
raise TypeError('The argument given is not of `list` type.')
decoded = []
for x in encoded:
if isinstance(x, list):
decoded.extend(x[0] * [x[1]])
else:
decoded.append(x)
return decoded
|
8fb273140509f5a550074c6d85e485d2dc1c79d0
| 3,643,918
|
import random
def create_offset(set_point_value):
"""Docstring here (what does the function do)"""
offset_value = random.randint(-128, 128)
offset_value_incrementation = float(offset_value / 100)
return set_point_value - offset_value_incrementation
|
8b41ce32d98edd87c2317a971d87f9b74c3f1b6c
| 3,643,919
|
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
print(example)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
|
88a7bd54dbaa8fedfc1cc09665dc3676d5effc8f
| 3,643,920
|
import hashlib
import queue
def startpeerusersync(
server, user_id, resync_interval=OPTIONS["Deployment"]["SYNC_INTERVAL"]
):
"""
Initiate a SYNC (PULL + PUSH) of a specific user from another device.
"""
user = FacilityUser.objects.get(pk=user_id)
facility_id = user.facility.id
device_info = get_device_info()
command = "sync"
common_job_args = dict(
keep_alive=True,
resync_interval=resync_interval,
job_id=hashlib.md5("{}::{}".format(server, user).encode()).hexdigest(),
extra_metadata=prepare_sync_task(
facility_id,
user_id,
user.username,
user.facility.name,
device_info["device_name"],
device_info["instance_id"],
server,
type="SYNCPEER/SINGLE",
),
)
job_data = None
# attempt to resume an existing session
sync_session = find_soud_sync_session_for_resume(user, server)
if sync_session is not None:
command = "resumesync"
# if resuming encounters an error, it should close the session to avoid a loop
job_data = prepare_soud_resume_sync_job(
server, sync_session.id, user_id, **common_job_args
)
# if not resuming, prepare normal job
if job_data is None:
job_data = prepare_soud_sync_job(
server, facility_id, user_id, **common_job_args
)
job_id = queue.enqueue(peer_sync, command, **job_data)
return job_id
|
c32ac73c11bf7114477fe49bd53ea8beb012522b
| 3,643,921
|
import re
def clean_url(str_text_raw):
"""This function eliminate a string URL in a given text"""
str_text = re.sub("url_\S+", "", str_text_raw)
str_text = re.sub("email_\S+", "", str_text)
str_text = re.sub("phone_\S+", "", str_text)
return(re.sub("http[s]?://\S+", "", str_text))
|
f14d4647bad72ec08aa64f19bbdd2726eb47d63b
| 3,643,922
|
def compare_strategies(strategy, baseline=always_roll(5)):
""" Вернуть среднее отношение побед STRATEGY против BASELINE """
as_first = 1 - make_average(play)(strategy, baseline)
as_second = make_average(play)(baseline, strategy)
return (as_first + as_second) / 2
|
609e6ecc057e72f081fd033a8ee2a02cead20b36
| 3,643,923
|
from pathlib import Path
def process_geo(
path_geo_file: Path,
*,
add_pop: bool = True,
add_neighbors: bool = True,
add_centroids: bool = False,
save_geojson: bool = False,
path_pop_file: Path = PATH_PA_POP,
path_output_geojson: Path = PATH_OUTPUT_GEOJSON,
) -> geopandas.GeoDataFrame:
"""
Reads a given geographic file (eg. geojson), converts it to a geopandas geoDataFrame,
adds a column for each polygon with a list of neighboring polygons, and adds a column
for each polygon.
Args:
path_geo_file (Path): Path to geographic file (eg. geojson) that will be read.
add_neighbors (bool): Adds a new column called NEIGBHORS for each county with all geographic regions that
border each region. Defaults to True.
add_pop (bool): Adds a new field with the population for each county. Defaults to True.
save_geojson (bool): Whether to save file as geojson. Default to False.
path_pop_file (Path) OPTIONAL: Path to CSV with population data to merge to geo data. Defaults to PATH_PA_POP.
add_centroids (bool) OPTIONAL: Gets centroids of each polygon if selected. Defaults to false.
path_output_geojson (Path. optional): Path to output geojson file.
"""
gdf = geopandas.read_file(path_geo_file)
# add population data
if add_pop:
df_pop = pd.read_csv(path_pop_file)
gdf = gdf.merge(df_pop, left_on="NAME", right_on="name", how="left")
gdf["population"] = gdf["population"].astype(int)
# add NEIGHBORS column
if add_neighbors:
gdf["NEIGHBORS"] = None
for index, country in gdf.iterrows():
# get 'not disjoint' countries
neighbors = gdf[~gdf.geometry.disjoint(country.geometry)].NAME.tolist()
# remove own name from the list
neighbors = [name for name in neighbors if country.NAME != name]
# add names of neighbors as NEIGHBORS value
gdf.at[index, "NEIGHBORS"] = ", ".join(neighbors)
if add_centroids:
gdf["CENTROID"] = gdf["geometry"].centroid
if save_geojson:
gdf.to_file(path_output_geojson, driver="GeoJSON")
return gdf
|
2f5e0d546a1fe3c1ad30501726fca48ae4549d0e
| 3,643,924
|
def nml_poisson(X, sum_x, sum_xxT, lmd_max=100):
"""
Calculate NML code length of Poisson distribution. See the paper below:
yamanishi, Kenji, and Kohei Miyaguchi. "Detecting gradual changes from data stream using MDL-change statistics."
2016 IEEE International Conference on Big Data (Big Data). IEEE, 2016.
parameters:
X: data sequence
sum_x: mean sequence
sum_xxT: variance sequence
lmd_max: the maximum value of lambda
returns:
NML code length
"""
n = len(X)
lmd_hat = sum_x / n
if lmd_hat == 0:
neg_log = np.sum(special.gammaln(X + 1))
else:
neg_log = -n * lmd_hat * np.log(lmd_hat) + \
n * lmd_hat + np.sum(special.gammaln(X + 1))
cpl = complexity_poisson(n, lmd_max)
return neg_log + cpl
|
bfcb71189e6c5a132e930c6fb83a59cdf7752982
| 3,643,925
|
import prometheus_client
def setup_metrics(app):
"""
Setup Flask app with prometheus metrics
"""
app.before_request(before_request)
app.after_request(after_request)
@app.route('/metrics')
def metrics():
# update k8s metrics each time this url is called.
global PROMETHEUS_METRICS
PROMETHEUS_METRICS = get_k8s_metrics()
return Response(prometheus_client.generate_latest(), mimetype='text/plain; version=0.0.4; charset=utf-8')
|
347893ca8ef01b7a3443bb0e75cf39a62b541847
| 3,643,926
|
def _getFormat(fileformat):
"""Get the file format constant from OpenSSL.
:param str fileformat: One of ``'PEM'`` or ``'ASN1'``.
:raises OpenSSLInvalidFormat: If **fileformat** wasn't found.
:returns: ``OpenSSL.crypto.PEM`` or ``OpenSSL.crypto.ASN1`` respectively.
"""
fileformat = 'FILETYPE_' + fileformat
fmt = getattr(OpenSSL.crypto, fileformat, None)
if fmt is not None:
return fmt
else:
raise OpenSSLInvalidFormat("Filetype format %r not found."% fileformat)
|
7aaa2ab3a8b2580052dd27e605242bda24ac8220
| 3,643,927
|
def clip(
arg: ir.NumericValue,
lower: ir.NumericValue | None = None,
upper: ir.NumericValue | None = None,
) -> ir.NumericValue:
"""
Trim values at input threshold(s).
Parameters
----------
arg
Numeric expression
lower
Lower bound
upper
Upper bound
Returns
-------
NumericValue
Clipped input
"""
if lower is None and upper is None:
raise ValueError("at least one of lower and upper must be provided")
op = ops.Clip(arg, lower, upper)
return op.to_expr()
|
9e8992e4323e97dcf6524e0275aac701fae5a305
| 3,643,929
|
def human_time_duration(seconds: int) -> str:
"""For a passed-in integer (seconds), return a human-readable duration string.
"""
if seconds <= 1:
return '<1 second'
parts = []
for unit, div in TIME_DURATION_UNITS:
amount, seconds = divmod(int(seconds), div)
if amount > 0:
parts.append('{} {}{}'.format(amount, unit, "" if amount == 1 else "s"))
return ', '.join(parts)
|
714c5a90fb298deb9652703625c78c19e384296e
| 3,643,930
|
def reindex_network_nodes(network):
"""Reindex the nodes of a channel network."""
node_reindexer = SegmentNodeReindexer()
network.for_each(node_reindexer)
return network
|
d98c6f2a8c1e2c6b1d1a79e80d873f7437f33401
| 3,643,931
|
def avg_arrays_1d(data, axis=None, weights=None, **kws):
"""Average list of 1D arrays or curves by interpolation on a reference axis
Parameters
----------
data : lists of lists
data_fmt : str
define data format
- "curves" -> :func:`curves_to_matrix`
- "lists" -> :func:`curves_to_matrix`
weights : None or array
weights for the average
Returns
-------
axis, zavg : 1D arrays
np.average(zdats)
"""
data_fmt = kws.pop("data_fmt", "curves")
if data_fmt == "curves":
ax, mat = curves_to_matrix(data, axis=axis, **kws)
elif data_fmt == "lists":
ax, mat = lists_to_matrix(data, axis=axis, **kws)
else:
raise NameError("'data_fmt' not understood")
return ax, np.average(mat, axis=0, weights=weights)
|
31b441b5e8884c20b664466435af0cd54f3d4c03
| 3,643,933
|
def _is_double(arr):
"""
Return true if the array is doubles, false if singles, and raise an error if it's neither.
:param arr:
:type arr: np.ndarray, scipy.sparse.spmatrix
:return:
:rtype: bool
"""
# Figure out which dtype for data
if arr.dtype == np.float32:
return False
elif arr.dtype == np.float64:
return True
else:
raise ValueError("Only float32 or float64 dtypes are supported")
|
f476d5cf088c2dc8877858099978cb4a47dcd6de
| 3,643,934
|
def HfcVd(M, far='default'):
"""
Computes the vitual dimensionality (VD) measure for an HSI
image for specified false alarm rates. When no false alarm rate(s) is
specificied, the following vector is used: 1e-3, 1e-4, 1e-5.
This metric is used to estimate the number of materials in an HSI scene.
Parameters:
M: `numpy array`
HSI data as a 2D matrix (N x p).
far: `list [default default]`
False alarm rate(s).
Returns: python list
VD measure, number of materials estimate.
References:
C.-I. Chang and Q. Du, "Estimation of number of spectrally distinct
signal sources in hyperspectral imagery," IEEE Transactions on
Geoscience and Remote Sensing, vol. 43, no. 3, mar 2004.
J. Wang and C.-I. Chang, "Applications of independent component
analysis in endmember extraction and abundance quantification for
hyperspectral imagery," IEEE Transactions on Geoscience and Remote
Sensing, vol. 44, no. 9, pp. 2601-1616, sep 2006.
"""
N, numBands = M.shape
# calculate eigenvalues of covariance and correlation between bands
lambda_cov = np.linalg.eig(np.cov(M.T))[0] # octave: cov(M')
lambda_corr = np.linalg.eig(np.corrcoef(M.T))[0] # octave: corrcoef(M')
# not realy needed:
lambda_cov = np.sort(lambda_cov)[::-1]
lambda_corr = np.sort(lambda_corr)[::-1]
if far == 'default':
far = [10**-3, 10**-4, 10**-5]
else:
far = [far]
numEndmembers_list = []
for y in range(len(far)):
numEndmembers = 0
pf = far[y]
for x in range(numBands):
sigmaSquared = (2.*lambda_cov[x]/N) + (2.*lambda_corr[x]/N) + (2./N)*lambda_cov[x]*lambda_corr[x]
sigma = sp.sqrt(sigmaSquared)
tau = -ss.norm.ppf(pf, 0, abs(sigma))
if (lambda_corr[x]-lambda_cov[x]) > tau:
numEndmembers += 1
numEndmembers_list.append(numEndmembers)
return numEndmembers_list
|
d70813a914ff6c210f2084d2f576499f1bea46cc
| 3,643,935
|
def load_multicenter_aids_cohort_study(**kwargs):
"""
Originally in [1]::
Siz: (78, 4)
AIDSY: date of AIDS diagnosis
W: years from AIDS diagnosis to study entry
T: years from AIDS diagnosis to minimum of death or censoring
D: indicator of death during follow up
i AIDSY W T D
1 1990.425 4.575 7.575 0
2 1991.250 3.750 6.750 0
3 1992.014 2.986 5.986 0
4 1992.030 2.970 5.970 0
5 1992.072 2.928 5.928 0
6 1992.220 2.780 4.688 1
References
----------
[1] Cole SR, Hudgens MG. Survival analysis in infectious disease research: describing events in time. AIDS. 2010;24(16):2423-31.
"""
return _load_dataset("multicenter_aids_cohort.tsv", sep="\t", index_col=0, **kwargs)
|
a318fcf1397d4a26d98843fc32e9d75393d7ca03
| 3,643,936
|
def bomb():
"""Bomb context appropriate for testing all simple wires cases."""
bomb = Bomb()
bomb.serial = 'abc123'
bomb.batteries = True
bomb.labels = ['FRK']
return bomb
|
20ba13d5c61272dc1ebad8d05d618025659d873f
| 3,643,937
|
def get_loss(pred, label):
"""
:param pred: BxNxC
:param label: BxN
:param smpw: BxN
:return:
"""
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=pred)
classify_loss = tf.reduce_mean(loss)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
|
9b70057cf9019d9352e4795a6908827e2cfbc15a
| 3,643,939
|
def calculate_prec_at_k(k, prediction, target):
"""
Calculating precision at k.
"""
best_k_pred = prediction.argsort()[:k]
best_k_target = target.argsort()[:k]
return len(set(best_k_pred).intersection(set(best_k_target))) / k
|
61637938078b938e90f6ada70888512a97435ca1
| 3,643,940
|
def get_ttl(cur):
"""Get the 'extract' table as lines of Turtle (the lines are returned as a list)."""
# Get ttl lines
cur.execute(
"""WITH literal(value, escaped) AS (
SELECT DISTINCT
value,
replace(replace(replace(value, '\\', '\\\\'), '"', '\\"'), '
', '\\n') AS escaped
FROM tmp_extract
)
SELECT
'@prefix ' || prefix || ': <' || base || '> .'
FROM prefix
UNION ALL
SELECT DISTINCT
subject
|| ' '
|| predicate
|| ' '
|| coalesce(
object,
'"' || escaped || '"^^' || datatype,
'"' || escaped || '"@' || language,
'"' || escaped || '"'
)
|| ' .'
FROM tmp_extract LEFT JOIN literal ON tmp_extract.value = literal.value;"""
)
lines = []
for row in cur.fetchall():
line = row[0]
if not line:
continue
# Replace newlines
line = line.replace("\n", "\\n")
lines.append(line)
return lines
|
454b843bfc47b5a6f11cc06ea881773421499eed
| 3,643,941
|
import numpy
def pnm80(date1, date2):
"""
Wrapper for ERFA function ``eraPnm80``.
Parameters
----------
date1 : double array
date2 : double array
Returns
-------
rmatpn : double array
Notes
-----
The ERFA documentation is below.
- - - - - - - - -
e r a P n m 8 0
- - - - - - - - -
Form the matrix of precession/nutation for a given date, IAU 1976
precession model, IAU 1980 nutation model.
Given:
date1,date2 double TDB date (Note 1)
Returned:
rmatpn double[3][3] combined precession/nutation matrix
Notes:
1) The TDB date date1+date2 is a Julian Date, apportioned in any
convenient way between the two arguments. For example,
JD(TDB)=2450123.7 could be expressed in any of these ways,
among others:
date1 date2
2450123.7 0.0 (JD method)
2451545.0 -1421.3 (J2000 method)
2400000.5 50123.2 (MJD method)
2450123.5 0.2 (date & time method)
The JD method is the most natural and convenient to use in
cases where the loss of several decimal digits of resolution
is acceptable. The J2000 method is best matched to the way
the argument is handled internally and will deliver the
optimum resolution. The MJD method and the date & time methods
are both good compromises between resolution and convenience.
2) The matrix operates in the sense V(date) = rmatpn * V(J2000),
where the p-vector V(date) is with respect to the true equatorial
triad of date date1+date2 and the p-vector V(J2000) is with
respect to the mean equatorial triad of epoch J2000.0.
Called:
eraPmat76 precession matrix, IAU 1976
eraNutm80 nutation matrix, IAU 1980
eraRxr product of two r-matrices
Reference:
Explanatory Supplement to the Astronomical Almanac,
P. Kenneth Seidelmann (ed), University Science Books (1992),
Section 3.3 (p145).
Copyright (C) 2013-2017, NumFOCUS Foundation.
Derived, with permission, from the SOFA library. See notes at end of file.
"""
(date1, date2,), rmatpn = arrayify_inputs_and_create_d3_fix(
[date1, date2], core_dims=[0, 0], out_core_shape=(3, 3), out_dtype=numpy.double)
rmatpn = ufunc.pnm80(date1, date2, rmatpn)
return rmatpn
|
e88ca2b9398a0403530c00925b0a25df85b5dbb0
| 3,643,942
|
def db_table_ddl(conn, table_name, table_cols, table_seqs, table_cons, **kwargs):
""" Generate create table DDL
"""
# Sequences
if table_seqs:
for s_ in table_seqs:
c_ = _t.m.daffkv(table_cols, "col_name", s_["col_name"])
if c_:
c_["is_seq"] = True
c_["col_type"] = "serial"
else:
raise _t.m.DbIntgrError("Sequence '%s' not related to any table '%s' column" % (s_["seq_name"], table_name))
# Columns
cols_ = []
for c_ in table_cols:
cols_.append("%s %s%s" % (c_["col_name"], c_["col_type"], c_.get("not_null") and " NOT NULL" or ""))
# Constraints
cons_ = []
if table_cons:
for c_ in table_cons:
if c_["con_type"] == "c":
cons_.append("CONSTRAINT %s %s" % (c_["con_name"], c_["con_src"]))
# Table prefix
table_pfx_ = kwargs.get("table_prefix", "")
# Construct DDL statement
stmt_ = "CREATE TABLE %s%s (%s%s)" % (table_pfx_, table_name, ", ".join(cols_), cons_ and ", %s" % ", ".join(cons_) or "")
if kwargs.get("apply"): conn.execute(stmt_, **kwargs)
return [stmt_, ]
|
ad053a7999a57d295481533de5edf1a5bc7725d4
| 3,643,943
|
def ne_to_wgs(northing, easting):
"""
Convert Northings and Eastings (NAD 83 Alaska Albers
Equal Area Conic) to WGS84 lat/long .
:param northing: AK Albers in meters
:param easting: AK Albers in meters
:returns: transformed coordinates in WGS84 lat long
"""
wgspoint = osr.SpatialReference()
wgspoint.ImportFromEPSG(4326)
nepoint = osr.SpatialReference()
nepoint.ImportFromEPSG(3338)
transform = osr.CoordinateTransformation(nepoint, wgspoint)
return transform.TransformPoint(easting, northing)
|
a282958652f2edc4707fd09068c06b40341b1d54
| 3,643,944
|
import random
def check_for_greeting(sentence, context):
"""If any of the words in the user's input was a greeting, return a greeting response"""
if (sentence.strip() in GREETING_KEYWORDS) and (context==True):
return getCurrentTimeGreeting()+", "+random.choice(GREETING_RESPONSES)
else:
return random.choice(GREETING_RESPONSES)
|
5a7fc501deb283c0a57cf3303cd3f533c8ea27da
| 3,643,945
|
def user_is_aidant(view=None, redirect_field_name="next"):
"""
Similar to :func:`~django.contrib.auth.decorators.login_required`, but
requires the user to be :term:`allowed to create mandats`.
By default, this redirects users to home of espace aidants.
"""
def test(user):
return user.can_create_mandats
decorator = user_passes_test(
test,
login_url="espace_aidant_home",
redirect_field_name=redirect_field_name,
)
return decorator if (view is None) else decorator(view)
|
c5f4577aba513f7f0c3206637209c6fe0b28a20d
| 3,643,946
|
from datetime import datetime
def get_time_zone_offset(time_zone, date_time=None):
"""
Returns the time zone offset (e.g. -0800) of the time zone for given datetime
"""
date_time = datetime.now(utc) if date_time is None else date_time
return _format_time_zone_string(time_zone, date_time, '%z')
|
b52c87fcb94044dded0aca53b4340a1afc1ac20b
| 3,643,947
|
import torch
def loss_fn_kd(scores, target_scores, T=2.):
"""Compute knowledge-distillation (KD) loss given [scores] and [target_scores].
Both [scores] and [target_scores] should be tensors, although [target_scores] should be repackaged.
'Hyperparameter': temperature"""
device = scores.device
log_scores_norm = F.log_softmax(scores / T, dim=1)
targets_norm = F.softmax(target_scores / T, dim=1)
# if [scores] and [target_scores] do not have equal size, append 0's to [targets_norm]
if not scores.size(1) == target_scores.size(1):
print('size does not match')
n = scores.size(1)
if n>target_scores.size(1):
n_batch = scores.size(0)
zeros_to_add = torch.zeros(n_batch, n-target_scores.size(1))
zeros_to_add = zeros_to_add.to(device)
targets_norm = torch.cat([targets_norm.detach(), zeros_to_add], dim=1)
# Calculate distillation loss (see e.g., Li and Hoiem, 2017)
KD_loss_unnorm = -(targets_norm * log_scores_norm)
KD_loss_unnorm = KD_loss_unnorm.sum(dim=1) #--> sum over classes
KD_loss_unnorm = KD_loss_unnorm.mean() #--> average over batch
# normalize
KD_loss = KD_loss_unnorm * T**2
return KD_loss
|
2a68cc317731cb98c1bfd5ea7e4eb878b9b9cfb7
| 3,643,948
|
def geom_cooling(temp, k, alpha = 0.95):
"""Geometric temperature decreasing."""
return temp * alpha
|
4263e4cc8a5de21d94bc560e8ff364d8c07f97fd
| 3,643,949
|
def metadata_version(metadata, osmelem, grp_feat, res_feat, feature_suffix):
"""Compute the version-related features of metadata and append them into
the metadata table
Parameters
----------
metadata: pd.DataFrame
Metadata table to complete
osmelem: pd.DataFrame
original data used to compute versions; contains a 'elem' feature
grp_feat: object
string that indicates which feature from 'data' must be used to group items
res_feat: object
string that indicates the measured feature (how many items correspond
feature_suffix: str
string designing the end of the new feature names
"""
osmelem_nodes = osmelem.query('elem=="node"')
osmelem_ways = osmelem.query('elem=="way"')
osmelem_relations = osmelem.query('elem=="relation"')
metadata = group_stats(metadata, osmelem_nodes, grp_feat, res_feat,
'v', '_node'+feature_suffix)
metadata = group_stats(metadata, osmelem_ways, grp_feat, res_feat,
'v', '_way'+feature_suffix)
metadata = group_stats(metadata, osmelem_relations, grp_feat, res_feat,
'v', '_relation'+feature_suffix)
return metadata
|
3ad4c51bc471f5be3ab8d6e211ce17cb56ec8b52
| 3,643,950
|
def train_lin_reg():
"""Trains a LR model and persists it as pickle file"""
return render_template(
'default_html.html',
endpoint='train_model',
data=lr.train_model(),
)
|
606e961bf67cb6c3fe19093592cc390352b9101f
| 3,643,951
|
import re
def bytes_to_escaped_str(data, keep_spacing=False, escape_single_quotes=False):
"""
Take bytes and return a safe string that can be displayed to the user.
Single quotes are always escaped, double quotes are never escaped:
"'" + bytes_to_escaped_str(...) + "'"
gives a valid Python string.
Args:
keep_spacing: If True, tabs and newlines will not be escaped.
"""
if not isinstance(data, bytes):
raise ValueError("data must be bytes, but is {}".format(data.__class__.__name__))
# We always insert a double-quote here so that we get a single-quoted string back
# https://stackoverflow.com/questions/29019340/why-does-python-use-different-quotes-for-representing-strings-depending-on-their
ret = repr(b'"' + data).lstrip("b")[2:-1]
if not escape_single_quotes:
ret = re.sub(r"(?<!\\)(\\\\)*\\'", lambda m: (m.group(1) or "") + "'", ret)
if keep_spacing:
ret = re.sub(
r"(?<!\\)(\\\\)*\\([nrt])",
lambda m: (m.group(1) or "") + dict(n="\n", r="\r", t="\t")[m.group(2)],
ret
)
return ret
|
fe8aa0ed3a8e3f2c7a2cf1aaeebc555b7281bde7
| 3,643,952
|
def _session_path():
"""
Return the path to the current session
:return:
"""
path = bpy.data.filepath
return path
|
7bff6b26d8654399c8b8c04689f28dfa56300211
| 3,643,953
|
def user_auth(f):
"""Checks whether user is logged in or raises error 401."""
def decorator(*args, **kwargs):
if True is False:
abort(401)
return f(*args, **kwargs)
return decorator
|
36d13ea587abc404c49e3fdb98d11a848a44de1a
| 3,643,954
|
import warnings
def is_tensorrt_plugin_loaded():
"""Check if TensorRT plugins library is loaded or not.
Returns:
bool: plugin_is_loaded flag
"""
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This function will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
global plugin_is_loaded
return plugin_is_loaded
|
98db366dbb7f8fcce425381c229d85ea1e231160
| 3,643,955
|
import urllib
import json
def check_md5(config):
"""
Find MD5 hash in providers.
:param config: Parameters object
:type config: Parameters
:return: plain string with text or exception if not found hash
:rtype: str
:raises: HashNotFound, InvalidHashFormat
"""
if not isinstance(config, Parameters):
raise TypeError("Expected Parameters, got '%s' instead" % type(config))
providers_to_check = PASSWORD_MD5_CRACKING_PROVIDERS if config.provider == "all" else [config.provider]
plain_text = None
for p in providers_to_check:
# Make URL using md5cracker API
url = "http://md5cracker.org/api/api.cracker.php?r=%s&database=%s&hash=%s" % (
randint(500, 10000),
p,
config.md5_hash)
# Proxy setted?
open_fn = None
if config.proxy is not None:
proxy_handler = urllib.request.ProxyHandler({config.proxy.scheme: config.proxy.netloc})
if config.proxy_user is not None:
proxy_auth_handler = urllib.request.ProxyBasicAuthHandler()
proxy_auth_handler.add_password('realm', 'host', config.proxy_user, config.proxy_pass)
opener = urllib.request.build_opener(proxy_handler, proxy_auth_handler)
opener = urllib.request.build_opener(proxy_handler)
# This time, rather than install the OpenerDirector, we use it directly:
open_fn = opener.open
else:
open_fn = urllib.request.urlopen
# Get remote info
u = open_fn(url)
_tmp_results = u.read().decode('utf-8')
if _tmp_results is None:
continue
_json_results = json.loads(_tmp_results)
# Its fails?
if _json_results['status'] is False:
# Check if reason is for not recoverable error
if 'Invalid hash' in _json_results['message']:
raise InvalidHashFormat("Invalid Hash Format")
# It not found hash continue
continue
else:
# Hash found!!!!
plain_text = _json_results['result']
break
if plain_text is None:
HashNotFound("Plain text not found for hash: '%s'" % config.md5_hash)
return plain_text
|
b5b493f73cc3d5041e2449f39d374d4a0b01ea6a
| 3,643,956
|
def is_polindrom(string):
""" This function checks whether the given string is a polindrom or not. """
for i,char in enumerate(string):
if char != string[-i-1]:
return False
return True
|
94e3cdb68c538da7b18e4567dc62fb35a58ebebb
| 3,643,957
|
def sensitive_file_response(file):
""" This function is helpful to construct your own views that will return the
actual bytes for a sensitive image. You need to pass the literal bytes for
sensitive photos through your server in order to put security checks in front
of those bytes. So for instance you might put something like this in your
views.py:
def view_photo_of_steve(request, file_name):
if request.user.username != 'Steve':
raise Exception('Only Steve may look at photos of Steve!')
return sensitive_file_response('steves_s3_bucket', file_name)
def steves_page(request):
return render(
request, 'steve.html', {'steve_photo_url': reverse(
'view_photo_of_steve',
kwargs={'file_name': SteveFile.objects.first().file_name})})
And something like this in steve.html or whatever
<img src="{{ steve_photo_url }}">
"""
bucket_config = file.bucket_config()
if bucket_config.is_public:
raise Exception((
'S3 bucket {} is public, so performance-wise, it is best to just '
'leave this server out of it entirely and use public_photo_url '
'in djaveS3.S3 instead.').format(bucket_config.name))
img_bytes = Bucket(bucket_config).file_bytes(file.file_name)
if img_bytes:
return HttpResponse(
img_bytes, content_type=content_type_from_file_name(file.file_name))
return Http404()
|
8e66b65943b28ee3c7fce0e2f29df60da43be4de
| 3,643,959
|
def get_unmapped_read_count_from_indexed_bam(bam_file_name):
"""
Get number of unmapped reads from an indexed BAM file.
Args:
bam_file_name (str): Name of indexed BAM file.
Returns:
int: number of unmapped reads in the BAM
Note:
BAM must be indexed for lookup using samtools.
"""
index_output = tk_subproc.check_output('samtools idxstats %s' % bam_file_name, shell=True)
return int(index_output.strip().split('\n')[-1].split()[-1])
|
9088ab1476703d5c845b9c1bed960eb2209e0b5d
| 3,643,960
|
from re import T
def model_setup(model_dict, X_train, y_train, X_test, y_test, X_val,
y_val, rd=None, layer=None):
"""
Main function to set up network (create, load, test, save)
"""
rev = model_dict['rev']
dim_red = model_dict['dim_red']
if rd != None:
# Doing dimensionality reduction on dataset
print("Doing {} with rd={} over the training data".format(dim_red, rd))
X_train, X_test, X_val, dr_alg = dr_wrapper(X_train, X_test, dim_red, rd,
y_train, rev, X_val)
else: dr_alg = None
# Getting data parameters after dimensionality reduction
data_dict = get_data_shape(X_train, X_test, X_val)
no_of_dim = data_dict['no_of_dim']
# Prepare Theano variables for inputs and targets
if no_of_dim == 2: input_var = T.tensor('inputs')
elif no_of_dim == 3: input_var = T.tensor3('inputs')
elif no_of_dim == 4: input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
# Check if model already exists
if layer is not None:
network, model_exist_flag, layers = model_creator(model_dict, data_dict, input_var,
target_var, rd, layer)
else:
network, model_exist_flag = model_creator(model_dict, data_dict,
input_var, target_var, rd,
layer)
#Defining symbolic variable for network output
prediction = lasagne.layers.get_output(network)
#Defining symbolic variable for network parameters
params = lasagne.layers.get_all_params(network, trainable=True)
#Defining symbolic variable for network output with dropout disabled
test_prediction = lasagne.layers.get_output(network, deterministic=True)
# Building or loading model depending on existence
if model_exist_flag == 1:
# Load the correct model:
param_values = model_loader(model_dict, rd)
lasagne.layers.set_all_param_values(network, param_values)
elif model_exist_flag == 0:
# Launch the training loop.
print("Starting training...")
if layer is not None:
model_trainer(input_var, target_var, prediction, test_prediction,
params, model_dict, X_train, y_train,
X_val, y_val, network, layers)
else:
model_trainer(input_var, target_var, prediction, test_prediction,
params, model_dict, X_train, y_train,
X_val, y_val, network)
model_saver(network, model_dict, rd)
# Evaluating on retrained inputs
test_model_eval(model_dict, input_var, target_var, test_prediction,
X_test, y_test, rd)
return data_dict, test_prediction, dr_alg, X_test, input_var, target_var
|
4a17d298a1ea3574ba21d651e0bfc4da957c01f4
| 3,643,961
|
def AreEqual(image1, image2, tolerance=0, likely_equal=True):
"""Determines whether two images are identical within a given tolerance.
Setting likely_equal to False enables short-circuit equality testing, which
is about 2-3x slower for equal images, but can be image height times faster
if the images are not equal."""
return impl.AreEqual(image1, image2, tolerance, likely_equal)
|
7e9dbe469aefd089e87104100ede776996b65c83
| 3,643,962
|
def ppc_deconvolve(im, kernel, kfft=None, nchans=4,
same_scan_direction=False, reverse_scan_direction=False):
"""PPC image deconvolution
Given an image (or image cube), apply PPC deconvolution kernel
to obtain the intrinsic flux distribution.
If performing PPC deconvolution, make sure to perform channel-by-channel
with the kernel in the appropriate scan direction. IPC is usually symmetric,
so this restriction may not apply.
Parameters
==========
im : ndarray
Image or array of images.
kernel : ndarry
Deconvolution kernel.
kfft : Complex ndarray
Option to directy supply the kernel's FFT rather than
calculating it within the function. The supplied ndarray
should have shape (ny,nx) equal to the input `im`. Useful
if calling ``ipc_deconvolve`` multiple times.
"""
# Image cube shape
sh = im.shape
ndim = len(sh)
if ndim==2:
ny, nx = sh
nz = 1
else:
nz, ny, nx = sh
chsize = int(nx / nchans)
im = im.reshape([nz,ny,nchans,-1])
# FFT of kernel
if kfft is None:
k_big = pad_or_cut_to_size(kernel, (ny,chsize))
kfft = np.fft.fft2(k_big)
# Channel-by-channel deconvolution
for ch in np.arange(nchans):
sub = im[:,:,ch,:]
if same_scan_direction:
flip = True if reverse_scan_direction else False
elif np.mod(ch,2)==0:
flip = True if reverse_scan_direction else False
else:
flip = False if reverse_scan_direction else True
if flip:
sub = sub[:,:,:,::-1]
sub = ipc_deconvolve(sub, kernel, kfft=kfft)
if flip:
sub = sub[:,:,:,::-1]
im[:,:,ch,:] = sub
im = im.reshape(sh)
return im
|
a2d6b05c33591d7f5e2f25b7f51258e5433ad970
| 3,643,963
|
def split_data(ratings, min_num_ratings, p_test=0.1, verbose=False, seed=988):
"""
Splits the data set (ratings) to training data and test data
:param ratings: initial data set (sparse matrix of dimensions n items and p users)
:param min_num_ratings: all users and items must have at least min_num_ratings per user and per item to be kept
:param p_test: proportion of the data dedicated to test
:param verbose: True if user wants to print details of computation
:param seed: random seed
:return: - valid_ratings (initial data set where some items and users where dropped)
- train train data (same shape as valid_ratings but with 1-p_test non_zero values)
- test data (same shape as valid_ratings but with p_test non zero values
"""
num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten()
num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten()
# set seed
np.random.seed(seed)
# select user and item based on the condition.
valid_users = np.where(num_items_per_user >= min_num_ratings)[0]
valid_items = np.where(num_users_per_item >= min_num_ratings)[0]
valid_ratings = ratings[valid_items, :][:, valid_users]
# define the sparse matrix that will contain train and test data
train = sp.lil_matrix(valid_ratings.shape)
test = sp.lil_matrix(valid_ratings.shape)
# get the index of non zero elements of the valid_ratings
non_zero_item, non_zero_users = valid_ratings.nonzero()
# for each item, select p_test percent of users to put in test and put the rest in train
for item in set(non_zero_item):
_, indexes = valid_ratings[item].nonzero()
test_ind = np.random.choice(indexes, size=int(len(indexes) * p_test))
train_ind = list(set(indexes) - set(test_ind))
train[item, train_ind] = valid_ratings[item, train_ind]
test[item, test_ind] = valid_ratings[item, test_ind]
if verbose:
print('Shape of original ratings : {}'.format(ratings.shape))
print('Shape of valid ratings (and of train and test data) : {}'.format(valid_ratings.shape))
print("Total number of nonzero elements in original data : {v}".format(v=ratings.nnz))
print("Total number of nonzero elements in train data : {v}".format(v=train.nnz))
print("Total number of nonzero elements in test data : {v}".format(v=test.nnz))
return valid_ratings, train, test
|
9f77f8b35465de1e97082f4ba897ecd94db33801
| 3,643,965
|
def _adjust_block(p, ip, filters, block_id=None):
"""Adjusts the input `previous path` to match the shape of the `input`.
Used in situations where the output number of filters needs to be changed.
Arguments:
p: Input tensor which needs to be modified
ip: Input tensor whose shape needs to be matched
filters: Number of output filters to be matched
block_id: String block_id
Returns:
Adjusted Keras tensor
"""
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
img_dim = 2 if K.image_data_format() == 'channels_first' else -2
ip_shape = K.int_shape(ip)
if p is not None:
p_shape = K.int_shape(p)
with K.name_scope('adjust_block'):
if p is None:
p = ip
elif p_shape[img_dim] != ip_shape[img_dim]:
with K.name_scope('adjust_reduction_block_%s' % block_id):
p = Activation('relu', name='adjust_relu_1_%s' % block_id)(p)
p1 = AveragePooling2D(
(1, 1),
strides=(2, 2),
padding='valid',
name='adjust_avg_pool_1_%s' % block_id)(
p)
p1 = Conv2D(
filters // 2, (1, 1),
padding='same',
use_bias=False,
name='adjust_conv_1_%s' % block_id,
kernel_initializer='he_normal')(
p1)
p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = AveragePooling2D(
(1, 1),
strides=(2, 2),
padding='valid',
name='adjust_avg_pool_2_%s' % block_id)(
p2)
p2 = Conv2D(
filters // 2, (1, 1),
padding='same',
use_bias=False,
name='adjust_conv_2_%s' % block_id,
kernel_initializer='he_normal')(
p2)
p = concatenate([p1, p2], axis=channel_dim)
p = BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='adjust_bn_%s' % block_id)(
p)
elif p_shape[channel_dim] != filters:
with K.name_scope('adjust_projection_block_%s' % block_id):
p = Activation('relu')(p)
p = Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='adjust_conv_projection_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
p)
p = BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='adjust_bn_%s' % block_id)(
p)
return p
|
3ba9b4cc7736511dd54bd907975752c942d82fa2
| 3,643,966
|
def filter_points(points: np.array, image_width: int, image_height: int) -> np.array:
"""
function finds indexes of points that are within image frame ( within image width and height )
searches for
points with x coordinate greater than zero, less than image_width
points with y coordinate greater than zero, less than image_height
Args:
points: points to be filter, shape: number_points,2
image_width: width of image frame
image_height: height of image frame
return:
indexes of points that satisfy both conditions
"""
# points with x coordinate greater than zero, less than image_width
in_w = np.logical_and(points[:, 0] > 0, points[:, 0] < image_width)
# points with y coordinate greater than zero, less than image_height
in_h = np.logical_and(points[:, 1] > 0, points[:, 1] < image_height)
return np.logical_and(in_w, in_h)
|
6020984195f4f5f2a9eb8f1efde3ee186ca82328
| 3,643,968
|
import inspect
def url_of_LumpClass(LumpClass: object) -> str:
"""gets a url to the definition of LumpClass in the GitHub repo"""
script_url = LumpClass.__module__[len("bsp_tool.branches."):].replace(".", "/")
line_number = inspect.getsourcelines(LumpClass)[1]
lumpclass_url = f"{branches_url}{script_url}.py#L{line_number}"
return lumpclass_url
|
dcd5adf9914055afb10b537d032c2ada32406950
| 3,643,969
|
def correlation_sum(indicators, embedding_dim):
"""
Calculate a correlation sum
Useful as an estimator of a correlation integral
Parameters
----------
indicators : 2d array
matrix of distance threshold indicators
embedding_dim : integer
embedding dimension
Returns
-------
corrsum : float
Correlation sum
indicators_joint
matrix of joint-distance-threshold indicators
"""
if not indicators.ndim == 2:
raise ValueError('Indicators must be a matrix')
if not indicators.shape[0] == indicators.shape[1]:
raise ValueError('Indicator matrix must be symmetric (square)')
if embedding_dim == 1:
indicators_joint = indicators
else:
corrsum, indicators = correlation_sum(indicators, embedding_dim - 1)
indicators_joint = indicators[1:, 1:]*indicators[:-1, :-1]
nobs = len(indicators_joint)
corrsum = np.mean(indicators_joint[np.triu_indices(nobs, 1)])
return corrsum, indicators_joint
|
9d5e82d6e9e4107ca14e114ed501ced67abfe25f
| 3,643,970
|
def find_suitable_serializer(obj):
"""
Find serializer that is suitable for this operation
:param T obj: The object that needs to be serialized
:return: The first suitable serializer for this type of object
:rtype: mlio.io.serializers.implementations.SerializerBase
"""
for serializer in __serializers_registry.values():
if serializer.can_serialize(obj):
return serializer
raise UnknownObjectType("Cannot find a suitalble serializer for object of type {}".format(type(object)))
|
b162e5eed35e18485c1d7c21dd27de3b2fd55a47
| 3,643,971
|
def square(x):
"""Return x squared."""
return x * x
|
d3177d90b4d1c76c0a426b3613c17cced404db45
| 3,643,972
|
def int_div_test(equation, val):
"""
Comparison for the integer division binary search.
:equation: Equation to test
:val: Input to the division
"""
r1 = equation(val)
if r1 == None:
return None
r2 = equation(val - 1)
if r2 == None:
return None
if r1 == 1 and r2 == 0:
return 0
elif r1 >= 1:
return 1
else:
return -1
|
16b9106ddb1fc7472339019926a891c6c1942d18
| 3,643,973
|
def get_market_deep(symbols=None, output_format='json', **kwargs):
"""
Top-level function to obtain DEEP data for a symbol or list of symbols
Parameters
----------
symbols: str or list, default None
A symbol or list of symbols
output_format: str, default 'json', optional
Desired output format. JSON required.
kwargs:
Additional Request Parameters (see base class)
Notes
-----
Pandas not supported as an output format for the DEEP endpoint.
"""
return DEEP(symbols, output_format, **kwargs).fetch()
|
f54dfc58835f1b10b35f54b8abd9635ffa2f5ace
| 3,643,974
|
def read_ss(path, dataset, order=None):
""" Read secondary structure prediction file
using specified order or automatically determined order based on results"""
with open(path, 'r') as f:
lines = f.readlines()
lines = [line.split() for line in lines]
start = 0
length = len(dataset.sequences[0])
for i, line in enumerate(lines):
if len(line) == 6 and line[0] == '1' and line[2] in ['C', 'E', 'H']:
start = i
break
data = np.array(lines[start:start+length])
seq = ''.join(list(data[:,1]))
assert str(seq) == str(dataset.sequences[0])
if order is None:
coil_defined = 1
sheet_defined = 1
helix_defined = 1
current_line = 0
order = np.zeros(3)
while coil_defined + sheet_defined + helix_defined > 1:
if data[current_line, 2] == 'C' and coil_defined > 0:
values = np.array(data[current_line, 3:6])
a = np.argmax(values)
if values[a-1] < values[a] and values[a-2] < values[a]:
order[0] = a + 3
coil_defined = 0
elif data[current_line, 2] == 'E' and sheet_defined > 0:
values = np.array(data[current_line, 3:6])
a = np.argmax(values)
if values[a-1] < values[a] and values[a-2] < values[a]:
order[1] = a + 3
sheet_defined = 0
elif data[current_line, 2] == 'H' and helix_defined > 0:
values = np.array(data[current_line, 3:6])
a = np.argmax(values)
if values[a-1] < values[a] and values[a-2] < values[a]:
order[2] = a + 3
helix_defined = 0
if coil_defined + sheet_defined + helix_defined == 1:
order[np.argmin(order)] = 12 - np.sum(order)
current_line = current_line + 1
assert sorted(order) == [3, 4, 5]
order = np.array(order, dtype=int)
return np.array(np.stack([data[:, order[0]],
data[:, order[1]],
data[:, order[2]]], axis=1), dtype=float)
|
691c03ed6a0c1375a74b27bfe498d197b1820723
| 3,643,975
|
def get_model():
"""
"""
# Describe the Convolutional Neural Network
model = tf.keras.Sequential([
# Convolutions
# Pooling
# Flatten units
tf.keras.layers.Flatten(),
# Input Layer
# Avoid overfitting
# Output layer - NUM_SHAPE_TYPES units
tf.keras.layers.Dense(NUM_SHAPE_TYPES, activation="softmax")
])
# Train the model
model.compile(
optimizer="adam",
loss="categorical_crossentropy",
metrics=["accuracy"]
)
return model
|
cc8403ec943acdae2916554378fb9ba475400dce
| 3,643,976
|
def Packet_genReadVpeMagnetometerAdvancedTuning(errorDetectionMode, buffer, size):
"""Packet_genReadVpeMagnetometerAdvancedTuning(vn::protocol::uart::ErrorDetectionMode errorDetectionMode, char * buffer, size_t size) -> size_t"""
return _libvncxx.Packet_genReadVpeMagnetometerAdvancedTuning(errorDetectionMode, buffer, size)
|
f4652a1d432b5c8df2e9abacd9ea990b5a07a09d
| 3,643,977
|
def compare(times_list=None,
name=None,
include_list=True,
include_stats=True,
delim_mode=False,
format_options=None):
"""
Produce a formatted comparison of timing datas.
Notes:
If no times_list is provided, produces comparison reports on all parallel
subdivisions present at the root level of the current timer. To compare
parallel subdivisions at a lower level, get the times data, navigate
within it to the parallel list of interest, and provide that as input
here. As with report(), any further parallel subdivisions encountered
have only their member with the greatest total time reported on (no
branching).
Args:
times_list (Times, optional): list or tuple of Times objects. If not
provided, uses current root timer.
name (any, optional): Identifier, passed through str().
include_list (bool, optional): Display stamps hierarchy.
include_stats (bool, optional): Display stamp comparison statistics.
delim_mode (bool, optional): If True, format for spreadsheet.
format_options (None, optional): Formatting options, see below.
Formatting Keywords & Defaults:
Human-readable Mode
- 'stamp_name_width': 18
- 'list_column_width': 12
- 'list_tab_width': 2
- 'stat_column_width': 8
- 'stat_tab_width': 2
- 'indent_symbol: ' ' (one space)
Delimited Mode
- 'delimiter': '\t' (tab)
- 'ident_symbol': '+'
Returns:
str: Times data comparison as formatted string.
Raises:
TypeError: If any element of provided collection is not a Times object.
"""
if times_list is None:
rep = ''
for par_dict in itervalues(f.root.times.par_subdvsn):
for par_name, par_list in iteritems(par_dict):
rep += report_loc.compare(par_list,
par_name,
include_list,
include_stats,
delim_mode,
format_options)
else:
if not isinstance(times_list, (list, tuple)):
raise TypeError("Expected a list/tuple of times instances for param 'times_list'.")
if not all([isinstance(times, Times) for times in times_list]):
raise TypeError("At least one member of param 'times_list' is not a Times object.")
rep = report_loc.compare(times_list,
name,
include_list,
include_stats,
delim_mode,
format_options)
return rep
|
7fc41c5544fb0be7569aa133e86e5bd98d8aae65
| 3,643,978
|
def spaces(elem, doc):
"""
Add LaTeX spaces when needed.
"""
# Is it in the right format and is it a Space?
if doc.format in ["latex", "beamer"] and isinstance(elem, Space):
if isinstance(elem.prev, Str) and elem.prev.text in ["«", "“", "‹"]:
return RawInline("\\thinspace{}", "tex")
if isinstance(elem.next, Str):
if elem.next.text == ":":
return RawInline("~", "tex")
if elem.next.text in [";", "?", "!", "»", "”", "›"]:
return RawInline("\\thinspace{}", "tex")
return None
|
68350ceeee26a6184df3a1767f725bdd65a48823
| 3,643,979
|
import re
def clean_caption(text):
"""
Remove brackets with photographer names or locations at the end of some captions
:param text: a photo caption
:return: text cleaned
"""
text = str(text)
text = re.sub(r'\s*\[.+?\]$', '.', text)
text = re.sub(r'\s*\(photo.+?\)', '', text)
return re.sub(r'-- --.+', '.', text).strip()
|
f07713de58c8304e437904914c78f89c795d9776
| 3,643,980
|
def take_slasher_snapshot(client):
"""
Collects all the command changes from the client's slash command processor.
Parameters
----------
client : ``Client``
The client, who will be snapshotted.
Returns
-------
collected : `None` or `tuple` of (`dict` of (`int`, `list` of `tuple` \
(`bool`, ``SlasherApplicationCommand``)) items, `None` or `set` of ``ComponentCommand``)
The collected commands of the slasher.
"""
slasher = getattr(client, 'slasher', None)
if (slasher is None) or (not isinstance(slasher, Slasher)):
collected = None
else:
command_states = slasher._command_states
collected_application_commands = None
for guild_id, command_state in command_states.items():
if guild_id == SYNC_ID_NON_GLOBAL:
active_commands = command_state._active
if (active_commands is None):
continue
command_changes = [(True, command) for command in active_commands]
else:
changes = command_state._changes
if changes is None:
continue
command_changes = [tuple(change) for change in changes]
if collected_application_commands is None:
collected_application_commands = {}
collected_application_commands[guild_id] = command_changes
collected_component_commands = slasher._component_commands
if collected_component_commands:
collected_component_commands = collected_component_commands.copy()
else:
collected_component_commands = None
if (collected_application_commands is None) and (collected_component_commands is None):
collected = None
else:
collected = (collected_application_commands, collected_component_commands)
return collected
|
f932b7bc7cef8957ae6f269907427d2ad15fd641
| 3,643,981
|
def zigpy_device_mains(zigpy_device_mock):
"""Device tracker zigpy device."""
def _dev(with_basic_channel: bool = True):
in_clusters = [general.OnOff.cluster_id]
if with_basic_channel:
in_clusters.append(general.Basic.cluster_id)
endpoints = {
3: {
"in_clusters": in_clusters,
"out_clusters": [],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
}
return zigpy_device_mock(
endpoints, node_descriptor=b"\x02@\x84_\x11\x7fd\x00\x00,d\x00\x00"
)
return _dev
|
e9fff1a96e2f3d544dfb3c48b5b574f087a0d406
| 3,643,982
|
def compute_average_precision(precision, recall):
"""Compute Average Precision according to the definition in VOCdevkit.
Precision is modified to ensure that it does not decrease as recall
decrease.
Args:
precision: A float [N, 1] numpy array of precisions
recall: A float [N, 1] numpy array of recalls
Raises:
ValueError: if the input is not of the correct format
Returns:
average_precison: The area under the precision recall curve. NaN if
precision and recall are None.
"""
if precision is None:
if recall is not None:
raise ValueError('If precision is None, recall must also be None')
return np.NAN
if not isinstance(precision, np.ndarray) or not isinstance(
recall, np.ndarray):
raise ValueError('precision and recall must be numpy array')
if precision.dtype != np.float or recall.dtype != np.float:
raise ValueError('input must be float numpy array.')
if len(precision) != len(recall):
raise ValueError('precision and recall must be of the same size.')
if not precision.size:
return 0.0
if np.amin(precision) < 0 or np.amax(precision) > 1:
raise ValueError('Precision must be in the range of [0, 1].')
if np.amin(recall) < 0 or np.amax(recall) > 1:
raise ValueError('recall must be in the range of [0, 1].')
if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)):
raise ValueError('recall must be a non-decreasing array')
recall = np.concatenate([[0], recall, [1]])
precision = np.concatenate([[0], precision, [0]])
# Preprocess precision to be a non-decreasing array
for i in range(len(precision) - 2, -1, -1):
precision[i] = np.maximum(precision[i], precision[i + 1])
indices = np.where(recall[1:] != recall[:-1])[0] + 1
average_precision = np.sum(
(recall[indices] - recall[indices - 1]) * precision[indices])
return average_precision
|
cffc91d4bab5fdaf7ae99d3babaf01348f3d1160
| 3,643,983
|
def minimize_experiment_document(document):
"""
Takes a document belonging to an experiment or to a library in an experiment
and strips it down to a subset of desired fields. This differs from other
non-experiment documents in that the attachment is a dictionary rather than
a simple string concatenation of document @id and hrefself.
"""
minimized_document = {}
for key in ('document_type', 'urls', 'references', 'attachment'):
if key in document:
if key == 'attachment':
minimized_document[key] = minimize_attachment(document[key], document['@id'])
else:
minimized_document[key] = document[key]
return minimized_document
|
d06c7d7f80c6f6f1e6b7808a42e3d668fe877a46
| 3,643,984
|
import pathlib
def detect_container(path: pathlib.Path) -> type[containers.Container]:
"""Detect the container of a file"""
container_type = filetype.archive_match(path)
container_mime_type = container_type.mime if container_type else None
return containers.get_container_by_mime_type(container_mime_type)
|
f30d625b8e4933da20c7ce707494d78b0e8c286b
| 3,643,985
|
import math
def score_mod(mod, word_count, mod_count, mod_match_unlabel):
"""计算模式的评分"""
p = word_count[mod]
u = len(mod_match_unlabel[mod])
t = mod_count[mod]
return (p / t) * math.log(u + 1, 2) * math.log(p + 1, 2)
|
1184800a2b6a2ebfbbbdcbcbf4a0d8f8cb261e98
| 3,643,986
|
def get_matrix(costs='direct'):
"""Table used to compare the most appropriate building class for DCs"""
health_care = eeio(['233210/health care buildings/us'], [1])
manu_bldg = eeio(['233230/manufacturing buildings/us'], [1])
util_bldg = eeio(['233240/utilities buildings and infrastructure/us'], [1])
_df = util_bldg.level_sectors()
_df.columns = ['util_bldg']
if costs == 'direct':
_df['manu_bldf'], _df['health_care']=manu_bldg.level_sectors('direct')['direct_costs'], health_care.level_sectors('direct')['direct_costs']
elif costs == 'total':
_df['manu_bldf'], _df['health_care']=manu_bldg.level_sectors()['total_costs'], health_care.level_sectors()['total_costs']
return _df
|
ba77546026f03cee45224c9b5338296db79db56d
| 3,643,987
|
def redshift_resource(context):
"""This resource enables connecting to a Redshift cluster and issuing queries against that
cluster.
Example:
.. code-block:: python
from dagster import ModeDefinition, execute_solid, solid
from dagster_aws.redshift import redshift_resource
@solid(required_resource_keys={'redshift'})
def example_redshift_solid(context):
return context.resources.redshift.execute_query('SELECT 1', fetch_results=True)
result = execute_solid(
example_redshift_solid,
run_config={
'resources': {
'redshift': {
'config': {
'host': 'my-redshift-cluster.us-east-1.redshift.amazonaws.com',
'port': 5439,
'user': 'dagster',
'password': 'dagster',
'database': 'dev',
}
}
}
},
mode_def=ModeDefinition(resource_defs={'redshift': redshift_resource}),
)
assert result.output_value() == [(1,)]
"""
return RedshiftResource(context)
|
55f59c34f2102c69b11a6e6a1fc3bd09a87f96af
| 3,643,988
|
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""Compute the two-dimensional inverse FFT.
Args:
a (cupy.ndarray): Array to be transform.
s (None or tuple of ints): Shape of the transformed axes of the
output. If ``s`` is not given, the lengths of the input along the
axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword
to specify the normalization mode. Default is ``None``, which is
an alias of ``"backward"``.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``s`` and type
will convert to complex if the input is other.
.. seealso:: :func:`numpy.fft.ifft2`
"""
func = _default_fft_func(a, s, axes)
return func(a, s, axes, norm, cufft.CUFFT_INVERSE)
|
fc8c73fec7d8bd46f86cf5e3051747915bc5e2db
| 3,643,989
|
def get_displayable_story_summary_dicts(user_id, story_summaries):
"""Returns a displayable summary dict of the story summaries
given to it.
Args:
user_id: str. The id of the learner.
story_summaries: list(StorySummary). A list of the
summary domain objects.
Returns:
list(dict). The summary dict corresponding to the given summary.
"""
summary_dicts = []
story_ids = [story_summary.id for story_summary in story_summaries]
stories = story_fetchers.get_stories_by_ids(story_ids)
topic_ids = [story.corresponding_topic_id for story in stories]
topics = topic_fetchers.get_topics_by_ids(topic_ids)
for index, story_summary in enumerate(story_summaries):
summary_dicts.append({
'id': story_summary.id,
'title': story_summary.title,
'node_titles': story_summary.node_titles,
'thumbnail_filename': story_summary.thumbnail_filename,
'thumbnail_bg_color': story_summary.thumbnail_bg_color,
'description': story_summary.description,
'url_fragment': story_summary.url_fragment,
'story_is_published': (
story_services.is_story_published_and_present_in_topic(
stories[index])),
'completed_node_titles': [
node.title for node in (
story_fetchers.get_completed_nodes_in_story(
user_id, story_summary.id))],
'all_node_dicts': [
node.to_dict() for node in stories[index].story_contents.nodes
],
'topic_name': topics[index].name,
'topic_url_fragment': topics[index].url_fragment,
'classroom_url_fragment': (
classroom_services.get_classroom_url_fragment_for_topic_id(
stories[index].corresponding_topic_id))
})
return summary_dicts
|
8ae157e824e42173674d3c15704e032a3d3c406d
| 3,643,990
|
from fuzzy.Exception import FuzzyException
def checkRange(value, ranges):
"""Checks if the value is in the defined range.
The range definition is a list/iterator from:
- float values belonging to the defined range M{x \in {a}}
- 2-tuples of two floats which define a range not including the tuple values itself M{x \in ]a,b[}
- 2-list of two floats which define a range including the list values M{x \in [a,b]}
The order of elements is not important. So could define the set of integer numbers by a
generator returning the following sequence: M{0,1,-1,2,-2,3-,3,...} .
It returns True if the value is in one of the defined ranges.
Otherwise it returns false.
"""
for part in ranges:
if isinstance(part, float):
if part == value:
return True
elif isinstance(part, list) and len(part) == 2:
if part[0] <= value and value <= part[1]:
return True
elif isinstance(part, tuple) and len(part) == 2:
if part[0] < value and value < part[1]:
return True
else:
raise FuzzyException("Range definition is wrong")
return False
|
49907dfe7a18054eb710bac9e3ec38e459d85e9b
| 3,643,991
|
import json
def read_anno_content(anno_file: str):
"""Read anno content."""
with open(anno_file) as opened:
content = json.load(opened)
return content
|
208d5f92d479ebfc0aa1e93d26ca68d3ce2a1e7e
| 3,643,992
|
def openRotatorPort(portNum=0, timeout=5):
"""
Open a serial port for the rotator
Open commport ``portNum`` with a timeout of ``timeout``.
Parameters
----------
portNum : integer, default: 0
commport for the serial connection to the rotator
timeout : number, default: 5 sec
timeout for the commport
Returns
-------
port : serial port object thingy
object that you use to communicate now with the serial port.
"""
ser = serial.Serial(0,timeout=5)
return ser
|
e479e41a11591964f1a7cf9ea88a4bbbc51e943c
| 3,643,993
|
from typing import List
from typing import Dict
def disable_poll_nodes_list(
nodes: List[str],
credentials: HTTPBasicCredentials = Depends(
check_credentials
), # pylint: disable=unused-argument
) -> Dict[str, str]:
"""Disable (snmp) polling on a list of nodes.
Exple of simplest call :
curl -X GET --user u:p -H "Content-type: application/json" \
http://127.0.0.1/api/disable_poll_nodes_list \
-d '["node1", "node2", "node3"]'"""
for node in nodes:
disable_node(node)
return {"response": "Ok"}
|
18900b0458c2a5b358d82ffdb4ed4ab9e382410d
| 3,643,994
|
import re
import json
def twitter_sp(name):
"""
This function is dedicated to extract location names from Twitter data(.txt file)
:param name: str, filename
:return: None
"""
def twitter_profile(user):
"""
This function fetch the the Twitter user's profiles for the location in them
:param user: str, user name
:return: str, user location
"""
consumer_key = credentials.consumer_key
consumer_secret = credentials.consumer_secret
access_token = credentials.access_token
access_secret = credentials.access_secret
api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=access_token, \
access_token_secret=access_secret)
# print(user)
try:
user_id = api.UsersLookup(screen_name=user)
stat = user_id[0].location
except:
stat = ''
# print(stat)
return stat
tmp = open(name, 'r')
text = tmp.read()
text = text.split('\n')[:-1]
# print(len(set(text)))
output = defaultdict(list)
p1 = re.compile(r'[<](.*?)[>]') # pattern for getting the user name in the text
for tweet in text:
# username = re.findall(p1, tweet)[0]
geo_stat = ''
# geo_stat = twitter_profile(username)
time = tweet[20:44]
msg = tweet[54::]
if geo_stat:
msg += geo_stat
res = token_ex(msg)
if res:
output[time].append(res)
with open('loca_' + name + '.json', 'w') as outfile:
json.dump(output, outfile)
|
4bff0e862d9d7be250687363693dcb8f33713b03
| 3,643,995
|
def extrudePoints(points, disp):
"""
Return a list of points including the initial points and extruded end
"""
farEnd=deepcopy(points)
farEnd[:,0]+=disp[0]
farEnd[:,1]+=disp[1]
farEnd[:,2]+=disp[2]
return np.vstack( (points,farEnd) )
|
c544ef372bb621bd6a96f9e91147bec6b1be800d
| 3,643,996
|
def hex_form(hash):
"""Returns the hash formatted in hexadecimal form"""
final_hash = ''
for i in range(len(hash)):
final_hash += format(hash[i], '02x')
return final_hash
|
67c1d376352517a9f368dfc56f03f1af3d45e128
| 3,643,997
|
import itertools
def rangeFromString(commaString):
""" Convert a comma string like "1,5-7" into a list [1,5,6,7]
Returns
--------
myList : list of integers
Reference
-------
http://stackoverflow.com/questions/6405208/\
how-to-convert-numeric-string-ranges-to-a-list-in-python
"""
listOfLists = [rangeFromHyphen(r) for r in commaString.split(',')]
flatList = itertools.chain(*listOfLists)
return flatList
|
f0a176e35c71882edbc64a515022346e199c08cb
| 3,643,998
|
def wup(synset1: Synset, synset2: Synset) -> float:
"""Return the Wu-Palmer similarity of *synset1* and *synset2*."""
lch = synset1.lowest_common_hypernyms(synset2, simulate_root=True)[0]
n = lch.max_depth() + 1
n1 = len(synset1.shortest_path(lch, simulate_root=True))
n2 = len(synset2.shortest_path(lch, simulate_root=True))
return (2 * n) / (n1 + n2 + 2 * n)
|
8e49126143fec99211abcafe04aa1f6fdb275e3a
| 3,643,999
|
from typing import List
def sum_per_agent(df: pd.DataFrame, columns: List[str]) -> pd.DataFrame:
"""Calculates summed values per agent for each given column individually"""
all_values_per_agent = pd.DataFrame(columns=columns)
for column in columns:
function = calc_sum(column)
value_per_agent = call_function_per_agent(df, function)
for agent_id, value in value_per_agent.items():
all_values_per_agent.at[agent_id, column] = value
return all_values_per_agent
|
b828e68a2f2555b9b12f4c17376a7f88211611d4
| 3,644,000
|
def calc_one_sample_metric(sample):
""" 计算 V1 数据一个样本的 rouge-l 和 bleu4 分数 """
if len(sample['best_match_scores']) == 0: # bad case
return -1, -1
pred_answers, ref_answers = [], []
pred_answers.append({'question_id': sample['question_id'],
'question_type': sample['question_type'],
# 取 gold fake answer 作为预测的答案
'answers': [''.join(sample['fake_answers'][sample['best_match_scores'].index(max(sample['best_match_scores']))])],
'entity_answers': [[]],
'yesno_answers': []})
ref_answers.append({'question_id': sample['question_id'],
'question_type': sample['question_type'],
'segmented_question': sample['segmented_question'],
'answers': [''.join(seg_ans) for seg_ans in sample['segmented_answers']],
'entity_answers': [[]],
'yesno_answers': [],
'documents': sample['documents']})
pred_dict = read_data_to_dict(pred_answers)
ref_dict = read_data_to_dict(ref_answers, is_ref=True)
metrics = compute_bleu_rouge(pred_dict, ref_dict)
rouge_l, bleu4 = metrics['ROUGE-L'], metrics['BLEU-4']
return rouge_l, bleu4
|
37ce17d36e2d6b31e0fdae29172de442d87fd676
| 3,644,002
|
def ta_1d(x, a, w_0, w_1):
"""1d tanh function."""
return a * np.tanh(w_0 + (w_1 * x))
|
ce062d87f3040d95d8bc5360a58b0b7c4625e877
| 3,644,003
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.