content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def _combine_by_cluster(ad, clust_key='leiden'):
"""
Given a new AnnData object, we want to create a new object
where each element isn't a cell, but rather is a cluster.
"""
clusters = []
X_mean_clust = []
for clust in sorted(set(ad.obs[clust_key])):
cells = ad.obs.loc[ad.obs[clust_key] == clust].index
X_clust = ad[cells,:].X
x_clust = _aggregate_expression(X_clust)
X_mean_clust.append(x_clust)
clusters.append(str(clust))
X_mean_clust = np.array(X_mean_clust)
ad_mean_clust = AnnData(
X=X_mean_clust,
var=ad.var,
obs=pd.DataFrame(
data=clusters,
index=clusters
)
)
return ad_mean_clust
|
2d48a9f504050604a679f35c06916c175e813ffe
| 3,642,021
|
def subsample_data(features, scaled_features, labels, subsamp): # This is only for poker dataset
""" Subsample the data. """
# k is class, will iterate from class 0 to class 1
# v is fraction to sample, i.e. 0.1, sample 10% of the current class being iterated
for k, v in subsamp.items():
ix = np.where(labels == k)[0]
ix_rest = np.where(labels != k)[0]
sample_ix = np.random.choice(ix, int(v * len(ix)), replace=False)
keep_ix = np.union1d(ix_rest, sample_ix)
# subsample
features = features[keep_ix, :]
scaled_features = scaled_features[keep_ix, :]
labels = labels[keep_ix]
return features, scaled_features, labels
|
3981fb0c793f64d52fc21cf2bbf87975bf97c786
| 3,642,022
|
def anomary_scores_ae(df_original, df_reduced):
"""AEで再生成された特徴量から異常度を計算する関数"""
"""再構成誤差を計算する異常スコア関数
Args:
df_original(array-like): training data of shape (n_samples, n_features)
df_reduced(array-like): prediction of shape (n_samples, n_features)
Returns:
pd.Series: 各データごとの異常スコア(二乗誤差をMinMaxScalingしたもの)
"""
# サンプルごとの予測値との二乗誤差を計算
loss = np.sum((np.array(df_original) - np.array(df_reduced)) ** 2, axis=1)
# lossをpd.Seriesに変換
loss = pd.Series(data=loss, index=df_original.index)
# 二乗誤差をMinMaxScalingして0~1のスコアに変換
min_max_normalized_loss = (loss - np.min(loss)) / (np.max(loss) - np.min(loss))
return min_max_normalized_loss
|
f6fe2dca7c10e19ee1e0a03a8d94c663ef5d77fd
| 3,642,023
|
import logging
def latest_res_ords():
"""Get last decade from reso and ords table"""
filename = 'documentum_scs_council_reso_ordinance_v.csv'
save_path = f"{conf['prod_data_dir']}/documentum_scs_council_reso_ordinance_v"
df = pd.read_csv(f"{conf['prod_data_dir']}/{filename}",
low_memory=False)
df['DOC_DATE'] = pd.to_datetime(df['DOC_DATE'],errors='coerce')
df_current = df.loc[df['DOC_DATE'] >= f"01/01/2016"]
general.pos_write_csv(df_current, f"{save_path}_2016_current.csv")
logging.info(f"Wrote 2016_current")
return f"Successfully extracted this decade of resos and ords"
|
c23b26e878887758c6822164bcac33eb7c28f765
| 3,642,025
|
def langevin_coefficients(temperature, dt, friction, masses):
"""
Compute coefficients for langevin dynamics
Parameters
----------
temperature: float
units of Kelvin
dt: float
units of picoseconds
friction: float
collision rate in 1 / picoseconds
masses: array
mass of each atom in standard mass units. np.inf masses will
effectively freeze the particles.
Returns
-------
tuple (ca, cb, cc)
ca is scalar, and cb and cc are n length arrays
that are used during langevin dynamics as follows:
during heat-bath update
v -> ca * v + cc * gaussian
during force update
v -> v + cb * force
"""
kT = BOLTZ * temperature
nscale = np.sqrt(kT / masses)
ca = np.exp(-friction * dt)
cb = dt / masses
cc = np.sqrt(1 - np.exp(-2 * friction * dt)) * nscale
return ca, cb, cc
|
a95ba22bda908fdd10171ed63eba1dc7906c0c1f
| 3,642,026
|
def get_description():
"""
Read full description from 'README.md'
:return: description
:rtype: str
"""
with open('README.md', 'r', encoding='utf-8') as f:
return f.read()
|
9a73c9dbaf88977f8c96eee056f92a7d5ff938fd
| 3,642,027
|
def qt_matrices(matrix_dim, selected_pp_indices=[0, 5, 10, 11, 1, 2, 3, 6, 7]):
"""
Get the elements of a special basis spanning the density-matrix space of
a qutrit.
The returned matrices are given in the standard basis of the
density matrix space. These matrices form an orthonormal basis
under the trace inner product, i.e. Tr( dot(Mi,Mj) ) == delta_ij.
Parameters
----------
matrix_dim : int
Matrix-dimension of the density-matrix space. Must equal 3
(present just to maintain consistency which other routines)
Returns
-------
list
A list of 9 numpy arrays each of shape (3, 3).
"""
if matrix_dim == 1: # special case of just identity mx
return [_np.identity(1, 'd')]
assert(matrix_dim == 3)
A = _np.array([[1, 0, 0, 0],
[0, 1. / _np.sqrt(2), 1. / _np.sqrt(2), 0],
[0, 0, 0, 1]], 'd') # projector onto symmetric space
def _toQutritSpace(inputMat):
return _np.dot(A, _np.dot(inputMat, A.transpose()))
qt_mxs = []
pp_mxs = pp_matrices(4)
#selected_pp_indices = [0,5,10,11,1,2,3,6,7] #which pp mxs to project
# labels = ['II', 'XX', 'YY', 'YZ', 'IX', 'IY', 'IZ', 'XY', 'XZ']
qt_mxs = [_toQutritSpace(pp_mxs[i]) for i in selected_pp_indices]
# Normalize so Tr(BiBj) = delta_ij (done by hand, since only 3x3 mxs)
qt_mxs[0] *= 1 / _np.sqrt(0.75)
#TAKE 2 (more symmetric = better?)
q1 = qt_mxs[1] - qt_mxs[0] * _np.sqrt(0.75) / 3
q2 = qt_mxs[2] - qt_mxs[0] * _np.sqrt(0.75) / 3
qt_mxs[1] = (q1 + q2) / _np.sqrt(2. / 3.)
qt_mxs[2] = (q1 - q2) / _np.sqrt(2)
#TAKE 1 (XX-II and YY-XX-II terms... not symmetric):
#qt_mxs[1] = (qt_mxs[1] - qt_mxs[0]*_np.sqrt(0.75)/3) / _np.sqrt(2.0/3.0)
#qt_mxs[2] = (qt_mxs[2] - qt_mxs[0]*_np.sqrt(0.75)/3 + qt_mxs[1]*_np.sqrt(2.0/3.0)/2) / _np.sqrt(0.5)
for i in range(3, 9): qt_mxs[i] *= 1 / _np.sqrt(0.5)
return qt_mxs
|
8e444fae5b936f4e20f615404712c91a5bbe3f4c
| 3,642,028
|
def get_signed_value(bit_vector):
"""
This function will generate the signed value for a given bit list
bit_vector : list of bits
"""
signed_value = 0
for i in sorted(bit_vector.keys()):
if i == 0:
signed_value = int(bit_vector[i])
else:
signed_value += ((2 << 7) << (int(i) - 1)) * int(bit_vector[i])
return signed_value
|
6b2b9a968576256738f396eeefba844561e2d2c7
| 3,642,029
|
def get_number_from_user_input(prompt: str, min_value: int, max_value: int) -> int:
"""gets a int integer from user input"""
# input loop
user_input = None
while user_input is None or user_input < min_value or user_input > max_value:
raw_input = input(prompt + f" ({min_value}-{max_value})? ")
try:
user_input = int(raw_input)
if user_input < min_value or user_input > max_value:
print("Invalid input, please try again")
except ValueError:
print("Invalid input, please try again")
return user_input
|
c9df4ac604b3bf8f0f9c2a35added1f23e88048e
| 3,642,032
|
def word_saliency(topic_word_distrib, doc_topic_distrib, doc_lengths):
"""
Calculate word saliency according to [Chuang2012]_ as ``saliency(w) = p(w) * distinctiveness(w)`` for a word ``w``.
.. [Chuang2012] J. Chuang, C. Manning, J. Heer. 2012. Termite: Visualization Techniques for Assessing Textual Topic
Models
:param topic_word_distrib: topic-word distribution; shape KxM, where K is number of topics, M is vocabulary size
:param doc_topic_distrib: document-topic distribution; shape NxK, where N is the number of documents, K is the
number of topics
:param doc_lengths: array of size N (number of docs) with integers indicating the number of terms per document
:return: array of size M (vocabulary size) with word saliency
"""
p_t = marginal_topic_distrib(doc_topic_distrib, doc_lengths)
p_w = marginal_word_distrib(topic_word_distrib, p_t)
return p_w * word_distinctiveness(topic_word_distrib, p_t)
|
47acaa848601192837eceef210389ada090b1fec
| 3,642,033
|
import json
def parse_cl_items(s):
"""Take a json string of checklist items and make a dict of item objects keyed on
item name (id)"""
dispatch = {"floating":Floating,
"weekly":Weekly,
"monthly": Monthly,
"daily":Daily
}
if len(s) == 0:
return []
raw = json.loads(s)
il = []
for d in raw:
t = d.pop('type')
t = t.lower()
il.append(dispatch[t](**d))
return il
|
274374f8ad3048f7bf9f17ed7d43740a83900a63
| 3,642,034
|
def get_queue(queue, flags=FLAGS.ALL, **conn):
"""
Orchestrates all the calls required to fully fetch details about an SQS Queue:
{
"Arn": ...,
"Region": ...,
"Name": ...,
"Url": ...,
"Attributes": ...,
"Tags": ...,
"DeadLetterSourceQueues": ...,
"_version": 1
}
:param queue: Either the queue name OR the queue url
:param flags: By default, set to ALL fields.
:param conn: dict containing enough information to make a connection to the desired account. Must at least have
'assume_role' key.
:return: dict containing a fully built out SQS queue.
"""
# Check if this is a Queue URL or a queue name:
if queue.startswith("https://") or queue.startswith("http://"):
queue_name = queue
else:
queue_name = get_queue_url(QueueName=queue, **conn)
sqs_queue = {"QueueUrl": queue_name}
return registry.build_out(flags, sqs_queue, **conn)
|
b39ea959835fc3ae32042cabac4bc4f9b5f1c425
| 3,642,035
|
def mixed_float_frame():
"""
Fixture for DataFrame of different float types with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
"""
df = DataFrame(tm.getSeriesData())
df.A = df.A.astype('float32')
df.B = df.B.astype('float32')
df.C = df.C.astype('float16')
df.D = df.D.astype('float64')
return df
|
aaef420666cf714c45bb87bf7e1eb484a4c06f69
| 3,642,036
|
import unittest
def create_parsetestcase(durationstring, expectation, format, altstr):
"""
Create a TestCase class for a specific test.
This allows having a separate TestCase for each test tuple from the
PARSE_TEST_CASES list, so that a failed test won't stop other tests.
"""
class TestParseDuration(unittest.TestCase):
'''
A test case template to parse an ISO duration string into a
timedelta or Duration object.
'''
def test_parse(self):
'''
Parse an ISO duration string and compare it to the expected value.
'''
result = parse_duration(durationstring)
self.assertEqual(result, expectation)
def test_format(self):
'''
Take duration/timedelta object and create ISO string from it.
This is the reverse test to test_parse.
'''
if altstr:
self.assertEqual(duration_isoformat(expectation, format),
altstr)
else:
# if durationstring == '-P2W':
# import pdb; pdb.set_trace()
self.assertEqual(duration_isoformat(expectation, format),
durationstring)
return unittest.TestLoader().loadTestsFromTestCase(TestParseDuration)
|
b74dbb969743bc98e22bfd80677da7f02891391e
| 3,642,037
|
import time
import traceback
def draw_data_from_db(host, port=None, pid=None, startTime=None, endTime=None, system=None, disk=None):
"""
Get data from InfluxDB, and visualize
:param host: client IP, required
:param port: port, visualize port data; optional, choose one from port, pid and system
:param pid: pid, visualize pid data; optional, choose one from port, pid and system
:param startTime: Start time; optional
:param endTime: end time; optional
:param system: visualize system data; optional, choose one from port, pid and system
:param disk: disk number; optional
:return:
"""
post_data = {
'types': 'system',
'cpu_time': [],
'cpu': [],
'iowait': [],
'usr_cpu': [],
'mem': [],
'mem_available': [],
'jvm': [],
'io_time': [],
'io': [],
'disk_r': [],
'disk_w': [],
'disk_d': [],
'rec': [],
'trans': [],
'nic': [],
'tcp': [],
'close_wait': [],
'time_wait': [],
'retrans': [],
'disk': disk}
res = {'code': 1, 'flag': 1, 'message': 'Successful!'}
connection = influxdb.InfluxDBClient(cfg.getInflux('host'), cfg.getInflux('port'), cfg.getInflux('username'),
cfg.getInflux('password'), cfg.getInflux('database'))
try:
if startTime and endTime: # If there is a start time and an end time
pass
elif startTime is None and endTime is None: # If the start time and end time do not exist, use the default time.
startTime = '2020-05-20 20:20:20'
endTime = time.strftime('%Y-%m-%d %H:%M:%S')
else: # If the end time does not exist, the current time is used
endTime = time.strftime('%Y-%m-%d %H:%M:%S')
s_time = time.time()
if port:
sql = f"select cpu, wait_cpu, mem, tcp, jvm, rKbs, wKbs, iodelay, close_wait, time_wait from \"{host}\" " \
f"where time>'{startTime}' and time<'{endTime}' and type='{port}' tz('Asia/Shanghai')"
logger.info(f'Execute sql: {sql}')
datas = connection.query(sql)
if datas:
post_data['types'] = 'port'
for data in datas.get_points():
post_data['cpu_time'].append(data['time'][:19].replace('T', ' '))
post_data['cpu'].append(data['cpu'])
post_data['iowait'].append(data['wait_cpu'])
post_data['mem'].append(data['mem'])
post_data['tcp'].append(data['tcp'])
post_data['jvm'].append(data['jvm'])
post_data['io'].append(data['iodelay'])
post_data['disk_r'].append(data['rKbs'])
post_data['disk_w'].append(data['wKbs'])
post_data['close_wait'].append(data['close_wait'])
post_data['time_wait'].append(data['time_wait'])
else:
res['message'] = f'No monitoring data of the port {port} is found, ' \
f'please check the port or time setting.'
res['code'] = 0
if disk:
sql = f"select rec, trans, net from \"{host}\" where time>'{startTime}' and time<'{endTime}' and " \
f"type='system' tz('Asia/Shanghai')"
logger.info(f'Execute sql: {sql}')
datas = connection.query(sql)
if datas:
for data in datas.get_points():
post_data['nic'].append(data['net'])
post_data['rec'].append(data['rec'])
post_data['trans'].append(data['trans'])
else:
res['message'] = 'No monitoring data is found, please check the disk number or time setting.'
res['code'] = 0
if pid:
pass
if system and disk:
disk_n = disk.replace('-', '')
disk_r = disk_n + '_r'
disk_w = disk_n + '_w'
disk_d = disk_n + '_d'
sql = f"select cpu, iowait, usr_cpu, mem, mem_available, {disk_n}, {disk_r}, {disk_w}, {disk_d}, rec, trans, " \
f"net, tcp, retrans from \"{host}\" where time>'{startTime}' and time<'{endTime}' and " \
f"type='system' tz('Asia/Shanghai')"
logger.info(f'Execute sql: {sql}')
datas = connection.query(sql)
if datas:
post_data['types'] = 'system'
for data in datas.get_points():
post_data['cpu_time'].append(data['time'][:19].replace('T', ' '))
post_data['cpu'].append(data['cpu'])
post_data['iowait'].append(data['iowait'])
post_data['usr_cpu'].append(data['usr_cpu'])
post_data['mem'].append(data['mem'])
post_data['mem_available'].append(data['mem_available'])
post_data['rec'].append(data['rec'])
post_data['trans'].append(data['trans'])
post_data['nic'].append(data['net'])
post_data['io'].append(data[disk_n])
post_data['disk_r'].append(data[disk_r])
post_data['disk_w'].append(data[disk_w])
post_data['disk_d'].append(data[disk_d])
post_data['tcp'].append(data['tcp'])
post_data['retrans'].append(data['retrans'])
else:
res['message'] = 'No monitoring data is found, please check the disk number or time setting.'
res['code'] = 0
res.update({'post_data': post_data})
logger.info(f'Time consuming to query is {time.time() - s_time}')
# lines = get_lines(post_data) # Calculate percentile, 75%, 90%, 95%, 99%
# res.update(lines)
except Exception as err:
logger.error(traceback.format_exc())
res['message'] = str(err)
res['code'] = 0
del connection, post_data
return res
|
73aa86b18dff59fdf88eff0b173e32fa4f3ed3ed
| 3,642,038
|
def get_sample_generator(filenames, batch_size, model_config):
"""Set data loader generator according to different tasks.
Args:
filenames(list): filenames of the input data.
batch_size(int): size of the each batch.
model_config(dict): the dictionary containing model configuration.
Raises:
NameError: if key ``task`` in ``model_config`` is invalid.
Returns:
reader(func): data reader.
"""
task = model_config['task']
if task == 'pretrain':
return pretrain_sample_reader(filenames, batch_size)
elif task == 'seq_classification':
label_name = model_config.get('label_name', 'labels')
return sequence_sample_reader(filenames, batch_size, label_name)
elif task in ['classification', 'regression']:
label_name = model_config.get('label_name', 'labels')
return normal_sample_reader(filenames, batch_size, label_name)
else:
raise NameError('Task %s is unsupport.' % task)
|
2033e081addf26a8f9074591b2f8992f39ed86c1
| 3,642,039
|
def execute_batch(table_type, bulk, count, topic_id, topic_name):
"""
Execute bulk operation. return true if operation completed successfully
False otherwise
"""
errors = False
try:
result = bulk.execute()
if result['nModified'] != count:
print(
"bulk execute of {} data for {}:{}.\nnumber of op sent to "
"bulk execute ({}) does not match nModified count".format(
table_type, topic_id, topic_name, count))
print ("bulk execute result {}".format(result))
errors = True
except BulkWriteError as ex:
print(str(ex.details))
errors = True
return errors
|
954de6b5bfefcea7a7bfdebdc7cb7b1b1ba1dd95
| 3,642,040
|
def process_domain_assoc(url, domain_map):
"""
Replace domain name with a more fitting tag for that domain.
User defined. Mapping comes from provided config file
Mapping in yml file is as follows:
tag:
- url to map to tag
- ...
A small example domain_assoc.yml is included
"""
if not domain_map:
return url
for key in domain_map:
if url in domain_map[key]:
return key
return url
|
29c0f81a4959d97cd91f839cbe511eb46872b5ec
| 3,642,041
|
def process_auc(gt_list, pred_list):
"""
Process AUC (AUROC) over lists.
:param gt_list: Ground truth list
:type gt_list: np.array
:param pred_list: Predictions list
:type pred_list: np.array
:return: Mean AUC over the lists
:rtype: float
"""
res = []
for i, gt in enumerate(gt_list):
if np.amax(gt) != 0:
pred = pred_list[i].flatten()
gt = gt.flatten()
# res.append(roc_auc_score(gt, pred))
res.append(f1_score(gt, pred))
return np.mean(res)
|
2f1de3ba0d5f1154ef4a5888d01d398fd8533793
| 3,642,042
|
def transform(
Y,
transform_type=None,
dtype=np.float32):
""" Transform STFT feature
Args:
Y: STFT
(n_frames, n_bins)-shaped np.complex array
transform_type:
None, "log"
dtype: output data type
np.float32 is expected
Returns:
Y (numpy.array): transformed feature
"""
Y = np.abs(Y)
if transform_type == 'log':
Y = np.log(np.maximum(Y, 1e-06))
return Y.astype(dtype)
|
96613eb77c20c1a09a3e41af176a36d2ce4d8080
| 3,642,043
|
def tol_vif_table(df, n = 5):
"""
:param df: dataframe
:param n: number of pairs to show
:return: table of correlations, tolerances, and VIF
"""
cor = get_top_abs_correlations(df, n)
tol = 1 - cor ** 2
vif = 1 / tol
cor_table = pd.concat([cor, tol, vif], axis=1)
cor_table.columns = ['Correlation', 'Tolerance', 'VIF']
return cor_table
|
94cf5715951892375e92ada019476b9ae3d09577
| 3,642,044
|
import random
def shuffled(iterable):
"""Randomly shuffle a copy of iterable."""
items = list(iterable)
random.shuffle(items)
return items
|
cd554d4a31e042dc1d2b4c7b246528a5184d558e
| 3,642,045
|
def test_rule(rule_d, ipv6=False):
""" Return True if the rule is a well-formed dictionary, False otherwise """
try:
_encode_iptc_rule(rule_d, ipv6=ipv6)
return True
except:
return False
|
7435cb900117e4c273b4157b2f25ba56aefd1355
| 3,642,047
|
def intersects(hp, sphere):
"""
The closed, upper halfspace intersects the sphere
(i.e. there exists a spatial relation between the two)
"""
return signed_distance(sphere.center, hp) + sphere.radius >= 0.0
|
9366824f03a269d0fa9e96f34260c621ad610d16
| 3,642,048
|
def invert_center_scale(X_cs, X_center, X_scale):
"""
This function inverts whatever centering and scaling was done by
``center_scale`` function:
.. math::
\mathbf{X} = \mathbf{X_{cs}} \\cdot \mathbf{D} + \mathbf{C}
**Example:**
.. code:: python
from PCAfold import center_scale, invert_center_scale
import numpy as np
# Generate dummy data set:
X = np.random.rand(100,20)
# Center and scale:
(X_cs, X_center, X_scale) = center_scale(X, 'range', nocenter=False)
# Uncenter and unscale:
X = invert_center_scale(X_cs, X_center, X_scale)
:param X_cs:
centered and scaled data set :math:`\mathbf{X_{cs}}`.
:param X_center:
vector of centers :math:`\mathbf{C}` applied on the original data set :math:`\mathbf{X}`.
:param X_scale:
vector of scales :math:`\mathbf{D}` applied on the original data set :math:`\mathbf{X}`.
:return:
- **X** - original data set :math:`\mathbf{X}`.
"""
try:
(_, n_variables) = np.shape(X_cs)
except:
n_variables = 1
if n_variables == 1:
X = X_cs * X_scale + X_center
else:
X = np.zeros_like(X_cs, dtype=float)
for i in range(0, n_variables):
X[:, i] = X_cs[:, i] * X_scale[i] + X_center[i]
return(X)
|
e37d82e7da932ea760981bb5a472799cd5a4d3d9
| 3,642,049
|
def weighted_smoothing(image, diffusion_weight=1e-4, data_weight=1.0,
weight_function_parameters={}):
"""Weighted smoothing of images: smooth regions, preserve sharp edges.
Parameters
----------
image : NumPy array
diffusion_weight : float or NumPy array, optional
The weight of the diffusion for smoothing. It can be provided
as an array the same shape as the image, or as a scalar that
will multiply the default weight matrix obtained from the edge
indicator function.
data_weight : float or NumPy array, optional
The weight of the image data to preserve fidelity to the image.
It can be provided as an array the same shape as the image, or
as a scalar that will multiply the default weight matrix obtained
by subtracting the edge indicator function from 1.0.
weight_function_parameters : dict, optional
The parameters sigma and rho for the edge indicator function.
Sigma is the standard deviation for the gaussian gradient
applied to the image: dI = N * gaussian_gradient_magnitude(image),
where N = min(image.shape) - 1, and rho is the scaling weight in
the definition of the edge indicator function: 1/(1 + (dI/rho)**2)
The default values are: {'sigma': 3.0, 'rho': None}.
If rho is None, it is calculated by rho = 0.23 * dI.max().
Returns
-------
smoothed_image : NumPy array
Raises
------
ValueError
If diffusion_weight or data_weight have the wrong type.
"""
if type( diffusion_weight ) is np.ndarray:
beta = diffusion_weight
elif not np.isscalar( diffusion_weight ):
raise ValueError("data_weight can only be None or a scalar number "\
"or a NumPy array the same shape as the image.")
else:
sigma = weight_function_parameters.get('sigma', 3.0)
rho = weight_function_parameters.get('rho', None)
if rho is None:
N = min(image.shape) - 1.0
dI = N * gaussian_gradient_magnitude( image, sigma, mode='nearest' )
rho = 0.23 * dI.max()
g = EdgeIndicatorFunction( image, rho, sigma )
G = g._g
beta = (G - G.min()) / (G.max() - G.min())
beta *= diffusion_weight
if type(data_weight) is np.ndarray:
alpha = data_weight
elif np.isscalar(data_weight):
alpha = data_weight * (beta.max() - beta) / (beta.max() - beta.min())
else:
raise ValueError("data_weight can only be None or a scalar number "\
"or a NumPy array the same shape as the image.")
rhs = alpha * image
grid = Grid2d( image.shape )
smooth_image = fem.solve_elliptic_pde( grid, alpha, beta, rhs )
return smooth_image
|
3c861b9a8878f2d85d581d8f8d1f62cf017a7920
| 3,642,050
|
import random
def get_random_tablature(tablature : Tablature, constants : Constants):
"""make a copy of the tablature under inspection and generate new random tablatures"""
new_tab = deepcopy(tablature)
for tab_instance, new_tab_instance in zip(tablature.tablature, new_tab.tablature):
if tab_instance.string == 6:
string, fret = random.choice(determine_combinations(tab_instance.fundamental, constants))
new_tab_instance.string, new_tab_instance.fret = string, fret
elif constants.init_mutation_rate > random.random():
string, fret = get_random_position(tab_instance.string, tab_instance.fret, constants)
new_tab_instance.string, new_tab_instance.fret = string, fret
return new_tab
|
befa3a488e2ca53e37032ed102a12b250594bd90
| 3,642,051
|
def _staticfy(value):
"""
Allows to keep backward compatibility with instances of OpenWISP which
were using the previous implementation of OPENWISP_ADMIN_THEME_LINKS
and OPENWISP_ADMIN_THEME_JS which didn't automatically pre-process
those lists of static files with django.templatetags.static.static()
and hence were not configured to allow those files to be found
by the staticfile loaders, if static() raises ValueError, we assume
one of either cases:
1. An old instance has upgraded and we keep returning the old value
so the file will continue being found although unprocessed by
django's static file machinery.
2. The value passed is wrong, instead of failing loudly we fail silently.
"""
try:
return static(value)
# maintain backward compatibility
except ValueError:
return value
|
2ac932a178a86d301dbb15602f3b59edb39cf3c1
| 3,642,052
|
def compare_rep(topic, replication_factor):
# type: (str, int) -> bool
"""Compare replication-factor in the playbook with the one actually set.
Keyword arguments:
topic -- topicname
replication_factor -- number of replications
Return:
bool -- True if change is needed, else False
"""
try:
metadata = admin.list_topics() # type(metadata.topics) = dict
except KafkaException as e:
msg = (
"Can not get metadata of topic %s: %s"
% (topic, e)
)
fail_module(msg)
old_rep = len(metadata.topics[topic].partitions[0].replicas) #type(partitions) = dict, access replicas with partition-id as key over .replicas-func
if replication_factor != old_rep:
if module.params['zookeeper'] is None:
msg = (
"For modifying the replication_factor of a topic,"
" you also need to set the zookeeper-parameter."
" At the moment, replication_factor is set to %s"
" and you tried to set it to %s."
% (old_rep, replication_factor)
)
fail_module(msg)
diff['before']['replication_factor'] = old_rep
diff['after']['replication_factor'] = replication_factor
return True
# if replication_factor == old_rep:
return False
|
882b028d078e507f9673e3ead6549da100a83226
| 3,642,053
|
def verbosity_option_parser() -> ArgumentParser:
"""
Creates a parser suitable to parse the verbosity option in different subparsers
"""
parser = ArgumentParser(add_help=False)
parser.add_argument('--verbosity', dest=VERBOSITY_ARGNAME, type=str.upper,
choices=ALLOWED_VERBOSITY,
help='verbosity level to use for this command and subsequent ones.')
return parser
|
c24f0704f1632cc0af416cf2c0a3e65c6845566f
| 3,642,054
|
import snappi
def b2b_config(api):
"""Demonstrates creating a back to back configuration of tx and rx
ports, devices and a single flow using those ports as endpoints for
transmit and receive.
"""
config = api.config()
config = snappi.Api().config()
config.options.port_options.location_preemption = True
tx_port, rx_port = config.ports \
.port(name='Tx Port', location='10.36.74.26;02;13') \
.port(name='Rx Port', location='10.36.74.26;02;14')
tx_device, rx_device = (config.devices \
.device(name='Tx Devices')
.device(name='Rx Devices')
)
tx_device.ethernets.ethernet(port_name=tx_port.name)
rx_device.ethernets.ethernet(port_name=rx_port.name)
tx_device.ethernets[-1].name = 'Tx Eth'
tx_device.ethernets[-1].mac = '00:00:01:00:00:01'
tx_device.ethernets[-1].ipv4_addresses.ipv4()
tx_device.ethernets[-1].ipv4_addresses[-1].name = 'Tx Ipv4'
tx_device.ethernets[-1].ipv4_addresses[-1].address = '1.1.1.1'
tx_device.ethernets[-1].ipv4_addresses[-1].gateway = '1.1.2.1'
tx_device.ethernets[-1].ipv4_addresses[-1].prefix = 16
vlan1, vlan2 = tx_device.ethernets[-1].vlans.vlan(name='v1').vlan(name='v2')
vlan1.id = 1
vlan2.id = 2
rx_device.ethernets[-1].name = 'Rx Eth'
rx_device.ethernets[-1].mac = '00:00:01:00:00:02'
flow = config.flows.flow(name='Tx -> Rx Flow')[0]
flow.tx_rx.port.tx_name = tx_port.name
flow.tx_rx.port.rx_name = rx_port.name
flow.size.fixed = 128
flow.rate.pps = 1000
flow.duration.fixed_packets.packets = 10000
eth, vlan, ip, tcp = flow.packet.ethernet().vlan().ipv4().tcp()
eth.src.value = '00:00:01:00:00:01'
eth.dst.values = ['00:00:02:00:00:01', '00:00:02:00:00:01']
eth.dst.metric_group = 'eth dst mac'
ip.src.increment.start = '1.1.1.1'
ip.src.increment.step = '0.0.0.1'
ip.src.increment.count = 10
ip.dst.decrement.start = '1.1.2.200'
ip.dst.decrement.step = '0.0.0.1'
ip.dst.decrement.count = 10
ip.priority.dscp.phb.values = [8, 16, 32]
ip.priority.dscp.ecn.value = 1
tcp.src_port.increment.start = 10
tcp.dst_port.increment.start = 1
return config
|
60a838885c058f5c65d3b331082f525b2e04b5c7
| 3,642,055
|
def fib(n):
"""Return the n'th Fibonacci number."""
if n < 0:
raise ValueError("Fibonacci number are only defined for n >= 0")
return _fib(n)
|
4aee5fbb4c9a497ffc4f63529b226ad3b08c0ef4
| 3,642,057
|
def gen(n):
"""
Compute the n-th generator polynomial.
That is, compute (x + 2 ** 1) * (x + 2 ** 2) * ... * (x + 2 ** n).
"""
p = Poly([GF(1)])
two = GF(1)
for i in range(1, n + 1):
two *= GF(2)
p *= Poly([two, GF(1)])
return p
|
29e6b1f164d93b21a2d98352ff60c8cf7a8d5864
| 3,642,058
|
def get_prefix(bot, message):
"""A callable Prefix for our bot. This could be edited to allow per server prefixes."""
# Notice how you can use spaces in prefixes. Try to keep them simple though.
prefixes = ['!']
# If we are in a guild, we allow for the user to mention us or use any of the prefixes in our list.
return commands.when_mentioned_or(*prefixes)(bot, message)
|
35567d49b747f51961fad861e00d9f9524126641
| 3,642,059
|
def parse_discontinuous_phrase(phrase: str) -> str:
"""
Transform discontinuous phrase into a regular expression. Discontinuity is
interpreted as taking place at any whitespace outside of terms grouped by
parentheses. That is, the whitespace indicates that anything can be in between
the left side and right side.
Example 1: x1 (x2 (x3"x4")) becomes x1.+(x2 (x3|x4))
"""
level = 0
parsed_phrase = ""
for index, char in enumerate(phrase):
if char == "(":
level += 1
elif char == ")":
level -= 1
elif char == " " and level == 0:
char = ".+"
parsed_phrase += char
return parsed_phrase
|
58fe394a08931e7e79afc00b9bb0e8e9981f3c81
| 3,642,060
|
def preprocess(frame):
"""
Preprocess the images before they are sent into the model
"""
#Read the image
bgr_img = frame.astype(np.float32)
#Opencv reads the picture as (N) HWC to get the HW value
orig_shape = bgr_img.shape[:2]
#Normalize the picture
bgr_img = bgr_img / 255.0
#Convert the picture to Lab space
lab_img = cv.cvtColor(bgr_img, cv.COLOR_BGR2Lab)
#Gets the L
orig_l = lab_img[:, :, 0]
if not orig_l.flags['C_CONTIGUOUS']:
orig_l = np.ascontiguousarray(orig_l)
#resize
lab_img = cv.resize(lab_img, (MODEL_WIDTH, MODEL_HEIGHT)).astype(np.float32)
l_data = lab_img[:, :, 0]
if not l_data.flags['C_CONTIGUOUS']:
l_data = np.ascontiguousarray(l_data)
#The L-part minus the average
l_data = l_data - 50
return orig_shape, orig_l, l_data
|
33a24a31ae9e25efb080037a807897f2762656c0
| 3,642,061
|
def draw_roc_curve(y_true, y_score, annot=True, name=None, ax=None):
"""Draws a ROC (Receiver Operating Characteristic) curve using class rankings predicted by a classifier.
Args:
y_true (array-like): True class labels (0: negative; 1: positive)
y_score (array-like): Predicted probability of positive-class membership
annot (bool, optional): Whether to create and add a label to the curve with the computed AUC
name (str, optional): Name of the curve to add to the AUC label
ax (Matplotlib.Axes, optional): The axes on which to draw the ROC curve
Returns:
ax (Matplotlib.Axes): The axes containing the ROC curve
"""
fpr, tpr, _ = roc_curve(y_true, y_score)
if ax is None:
ax = plt.gca()
# Add a label displaying the computed area under the curve
if annot:
roc_auc = auc(fpr, tpr)
if name is not None:
label = f'{name} AUC = {roc_auc:.3f}'
else:
label = f'AUC = {roc_auc:.3f}'
else:
label=None
ax.plot(fpr, tpr, label=label)
ax.set_xlabel('False positive rate')
ax.set_ylabel('True positive rate')
ax.legend(loc='best')
return ax
|
cf59a02c5f72f728b0d9179a4eee3f012da564df
| 3,642,062
|
def make_links_absolute(soup, base_url):
"""
Replace relative links with absolute links.
This one modifies the soup object.
"""
assert base_url is not None
#
for tag in soup.findAll('a', href=True):
tag['href'] = urljoin(base_url, tag['href'])
return soup
|
52d328c944d4a80b4f0a027a3b72f2fcebc152a9
| 3,642,063
|
def get_predictions(model, dataloader):
"""takes a trained model and validation or test dataloader
and applies the model on the data producing predictions
binary version
"""
model.eval()
all_y_hats = []
all_preds = []
all_true = []
all_attention = []
for batch_id, (data, label) in enumerate(dataloader):
label = label.squeeze()
bag_label = label[0]
bag_label = bag_label.cpu()
y_hat, preds, attention = model(data.to("cuda:0"))
y_hat = y_hat.squeeze(dim=0) # for binary setting
y_hat = y_hat.cpu()
preds = preds.squeeze(dim=0) # for binary setting
preds = preds.cpu()
all_y_hats.append(y_hat.numpy().item())
all_preds.append(preds.numpy().item())
all_true.append(bag_label.numpy().item())
attention_scores = np.round(attention.cpu().data.numpy()[0], decimals=3)
all_attention.append(attention_scores)
print("Bag Label:" + str(bag_label))
print("Predicted Label:" + str(preds.numpy().item()))
print("attention scores (unique ones):")
print(np.unique(attention_scores))
# print(attention_scores)
del data, bag_label, label
return all_y_hats, all_preds, all_true
|
f58a5863c211a24665db7348606d39232ad8af19
| 3,642,064
|
def _parse_objective(objective):
"""
Modified from deephyper/nas/run/util.py function compute_objective
"""
if isinstance(objective, str):
negate = (objective[0] == '-')
if negate:
objective = objective[1:]
split_objective = objective.split('__')
kind = split_objective[1] if len(split_objective) > 1 else 'last'
mname = split_objective[0]
# kind: min/max/last
if negate:
if kind == 'min':
kind = 'max'
elif kind == 'max':
kind = 'min'
return mname, kind
elif callable(objective):
logger.warn('objective is a callable, not a str, setting kind="last"')
return None, 'last'
else:
raise TypeError(f'unknown objective type {type(objective)}')
|
df8f23464cd04be9a3c61a2969200a0c98c4471e
| 3,642,066
|
def redirect_path_context_processor(request):
"""Procesador para generar el redirect_to para la localización en el selector de idiomas"""
return {'language_select_redirect_to': translate_url(request.path, settings.LANGUAGE_CODE)}
|
fdb62f3079079d63c280d2b887468c7893aafcf8
| 3,642,067
|
def RightCenter(cell=None):
"""Take up horizontal and vertical space, and place the cell on the right center of it."""
return FillSpace(cell, "right", "center")
|
ce64a346658813ab281168864e357cec1ba09c0b
| 3,642,068
|
def name_standard(name):
""" return the Standard version of the input word
:param name: the name that should be standard
:return name: the standard form of word
"""
reponse_name = name[0].upper() + name[1:].lower()
return reponse_name
|
65273cafaaa9aceb803877c2071dc043a0d598eb
| 3,642,069
|
def getChildElementsListWithTagAttribValueMatch(parent, tag, attrib, value):
"""
This method takes a parent element as input and finds all the sub elements (children)
containing specified tag and an attribute with the specified value.
Returns a list of child elements.
Arguments:
parent = parent element
tag = tag value of the sub-element(child) to be searched for.
attrib = attribute name for the sub-element with above given tag should have.
value = attribute value that the sub-element with above given tag, attribute should have.
"""
child_elements = parent.findall(".//%s[@%s='%s']" % (tag, attrib, value))
return child_elements
|
cae87e6548190ad0a675019b397eeb88289533ee
| 3,642,070
|
def f_engine (air_volume, energy_MJ):
"""Прямоточный воздушно реактивный двигатель.
Набегающий поток воздуха попадает в нагреватель, где расширяется,
А затем выбрасывается из сопла реактивной струёй.
"""
# Рабочее вещество, это атмосферный воздух:
working_mass = air_volume * AIR_DENSITY
# Делим энергию на полезную и бесполезную:
useful_energy_MJ = energy_MJ * ENGINE_USEFUL_ENERGY
useless_energy_MJ = energy_MJ - useful_energy_MJ
useful_energy_KJ = useful_energy_MJ * 1000
# Полезную энергию пускаем на разогрев воздуха:
working_mass_heat = AIR_HEAT_CAPACITY * useful_energy_KJ / working_mass
# Делаем поправку на температуру воздуха и переводим градусы в шкалу Кельвина:
working_mass_heat = AIR_TEMPERATURE + working_mass_heat + KELVIN_SCALE
# Давление разогретого воздуха увеличивается:
working_mass_pressure = f_heated_gas_pressure(working_mass_heat)
# Воздух подаётся на сопло Лаваля, так мы получаем скорость реактивной струи:
reactive_speed = f_de_laval_nozzle(working_mass_heat,
working_mass_pressure, ATMOSPHERIC_PRESSURE)
# Максимальная тяга, это масса рабочего вещества умноженная на его скорость:
max_engine_thrust = f_jet_force(working_mass, reactive_speed)
# Бесполезную тепловую энергию тоже требуется куда-то отводить:
engine_output = (max_engine_thrust, working_mass, reactive_speed, useless_energy_MJ)
return engine_output
|
3d307622fca17f16f03e49bd7f8b5b742217ee37
| 3,642,071
|
def not_numbers():
"""Non-numbers for (i)count."""
return [None, [1, 2], {-3, 4}, (6, 9.7)]
|
31f935916c8463f6192d0b2770c1034ee70a4fc5
| 3,642,072
|
import requests
def get_agol_token():
"""requests and returns an ArcGIS Token for the pre-registered application.
Client id and secrets are managed through the ArcGIS Developer's console.
"""
params = {
'client_id': app.config['ESRI_APP_CLIENT_ID'],
'client_secret': app.config['ESRI_APP_CLIENT_SECRET'],
'grant_type': "client_credentials"
}
request = requests.get(
'https://www.arcgis.com/sharing/oauth2/token',
params=params
)
token = request.json()
print("AGOL token acquired: {0}".format(token))
return token
|
7b240ef57264c1a88f10f4c06c9492a71dac8c11
| 3,642,073
|
def default_validate(social_account):
"""
Функция по-умолчанию для ONESOCIAL_VALIDATE_FUNC. Ничего не делает.
"""
return None
|
634382dbfe64eeed38225f8dca7e16105c40f7c2
| 3,642,074
|
import requests
def pull_early_late_by_stop(line_number,SWIFTLY_API_KEY, dateRange, timeRange):
"""
Pulls from the Swiftly APIS to get OTP.
Follow the docs: http://dashboard.goswift.ly/vta/api-guide/docs/otp
"""
line_table = pd.read_csv('line_table.csv')
line_table.rename(columns={"DirNum":"direction_id","DirectionName":"DIRECTION_NAME"},inplace=True)
line_table['direction_id'] = line_table['direction_id'].astype(str)
headers = {'Authorization': SWIFTLY_API_KEY}
payload = {'agency': 'vta', 'route': line_number, 'dateRange': dateRange,'timeRange': timeRange, 'onlyScheduleAdherenceStops':'True'}
url = 'https://api.goswift.ly/otp/by-stop'
r = requests.get(url, headers=headers, params=payload)
try:
swiftly_df = pd.DataFrame(r.json()['data'])
swiftly_df.rename(columns={"stop_id":"STOP_ID"},inplace=True)
swiftly_df = pd.merge(swiftly_df,line_table.query('lineabbr==%s'%line_number)[['direction_id','DIRECTION_NAME']])
swiftly_df['STOP_ID'] = swiftly_df['STOP_ID'].astype(int)
return swiftly_df
except KeyError:
print(r.json())
|
a64ad2e5fe84ee5ab5a49c8122f28b693382cf8e
| 3,642,075
|
def create_build_job(user, project, config, code_reference):
"""Get or Create a build job based on the params.
If a build job already exists, then we check if the build has already an image created.
If the image does not exists, and the job is already done we force create a new job.
Returns:
tuple: (build_job, image_exists[bool], build_status[bool])
"""
build_job, rebuild = BuildJob.create(
user=user,
project=project,
config=config,
code_reference=code_reference)
if build_job.succeeded and not rebuild:
# Check if image was built in less than an 6 hours
return build_job, True, False
if check_image(build_job=build_job):
# Check if image exists already
return build_job, True, False
if build_job.is_done:
build_job, _ = BuildJob.create(
user=user,
project=project,
config=config,
code_reference=code_reference,
nocache=True)
if not build_job.is_running:
# We need to build the image first
auditor.record(event_type=BUILD_JOB_STARTED_TRIGGERED,
instance=build_job,
actor_id=user.id,
actor_name=user.username)
build_status = start_dockerizer(build_job=build_job)
else:
build_status = True
return build_job, False, build_status
|
879ed02f142326b4a793bef2d8bcbc1de4faf64c
| 3,642,076
|
import json
def create(ranger_client: RangerClient, config: str):
"""
Creates a new Apache Ranger service repository.
"""
return ranger_client.create_service(json.loads(config))
|
a53fa80f94960f89410a60aae04a04417491f332
| 3,642,077
|
def populate_glue_catalogue_from_metadata(table_metadata, db_metadata, check_existence = True):
"""
Take metadata and make requisite calls to AWS API using boto3
"""
database_name = db_metadata["name"]
database_description = ["description"]
table_name = table_metadata["table_name"]
tbl_def = metadata_to_glue_table_definition(table_metadata, db_metadata)
if check_existence:
try:
glue_client.get_database(Name=database_name)
except glue_client.exceptions.EntityNotFoundException:
overwrite_or_create_database(database_name, db_metadata["description"])
try:
glue_client.delete_table(DatabaseName=database_name, Name=table_name)
except glue_client.exceptions.EntityNotFoundException:
pass
return glue_client.create_table(
DatabaseName=database_name,
TableInput=tbl_def)
|
c09af0344b523213010af8fdbcc0ed35328f165e
| 3,642,078
|
def choose_komoot_tour_live():
"""
Login with user credentials, download tour information,
choose a tour, and download it. Can be passed to
:func:`komoog.gpx.convert_tour_to_gpx_tracks`
afterwards.
"""
tours, session = get_tours_and_session()
for idx in range(len(tours)):
print(f"({idx+1}) {tours[idx]['name']}")
tour_id = int(input("Tour ID: "))
tour_id -= 1
tour = get_tour(tours,tour_id,session)
return tour
|
3be625643d6861c9aaff910506dce53e3f336e40
| 3,642,079
|
def root():
"""
The root stac page links to each collection (product) catalog
"""
return _stac_response(
dict(
**stac_endpoint_information(),
links=[
dict(
title="Collections",
description="All product collections",
rel="children",
type="application/json",
href=url_for(".collections"),
),
dict(
title="Arrivals",
description="Most recently added items",
rel="child",
type="application/json",
href=url_for(".arrivals"),
),
dict(
title="Item Search",
rel="search",
type="application/json",
href=url_for(".stac_search"),
),
dict(rel="self", href=request.url),
# Individual Product Collections
*(
dict(
title=product.name,
description=product.definition.get("description"),
rel="child",
href=url_for(".collection", collection=product.name),
)
for product, product_summary in _model.get_products_with_summaries()
),
],
conformsTo=[
"https://api.stacspec.org/v1.0.0-beta.1/core",
"https://api.stacspec.org/v1.0.0-beta.1/item-search",
],
)
)
|
0904122654a1ce71264489590a2a1813dad31689
| 3,642,080
|
def recursive_dict_of_lists(d, helper=None, prev_key=None):
"""
Builds dictionary of lists by recursively traversing a JSON-like
structure.
Arguments:
d (dict): JSON-like dictionary.
prev_key (str): Prefix used to create dictionary keys like: prefix_key.
Passed by recursive step, not intended to be used.
helper (dict): In case d contains nested dictionaries, you can specify
a helper dictionary with 'key' and 'value' keys to specify where to
look for keys and values instead of recursive step. It helps with
cases like: {'action': {'type': 'step', 'amount': 1}}, by passing
{'key': 'type', 'value': 'amount'} as a helper you'd get
{'action_step': [1]} as a result.
"""
d_o_l = {}
if helper is not None and helper['key'] in d.keys() and helper['value'] in d.keys():
if prev_key is not None:
key = f"{prev_key}_{helper['key']}"
else:
key = helper['key']
if key not in d_o_l.keys():
d_o_l[key] = []
d_o_l[key].append(d[helper['value']])
return d_o_l
for k, v in d.items():
if isinstance(v, dict):
d_o_l.update(recursive_dict_of_lists(v, helper=helper, prev_key=k))
else:
if prev_key is not None:
key = f'{prev_key}_{k}'
else:
key = k
if key not in d_o_l.keys():
d_o_l[key] = []
if isinstance(v, list):
d_o_l[key].extend(v)
else:
d_o_l[key].append(v)
return d_o_l
|
c615582febbd043adae6788585d004aabf1ac7e3
| 3,642,081
|
def same_shape(shape1, shape2):
"""
Checks if two shapes are the same
Parameters
----------
shape1 : tuple
First shape
shape2 : tuple
Second shape
Returns
-------
flag : bool
True if both shapes are the same (same length and dimensions)
"""
if len(shape1) != len(shape2):
return False
for i in range(len(shape1)):
if shape1[i] != shape2[i]:
return False
return True
|
9452f7973e510532cee587f2bf49a146fb8cc46e
| 3,642,082
|
def get_reference():
"""Get DrugBank references."""
return _get_model(drugbank.Reference)
|
b4d26c24559883253f3894a1c56151e1809e4050
| 3,642,083
|
def decode_matrix_fbs(fbs):
"""
Given an FBS-encoded Matrix, return a Pandas DataFrame the contains the data and indices.
"""
matrix = Matrix.Matrix.GetRootAsMatrix(fbs, 0)
n_rows = matrix.NRows()
n_cols = matrix.NCols()
if n_rows == 0 or n_cols == 0:
return pd.DataFrame()
if matrix.RowIndexType() is not TypedArray.TypedArray.NONE:
raise ValueError("row indexing not supported for FBS Matrix")
columns_length = matrix.ColumnsLength()
columns_index = deserialize_typed_array((matrix.ColIndexType(), matrix.ColIndex()))
if columns_index is None:
columns_index = range(0, n_cols)
# sanity checks
if len(columns_index) != n_cols or columns_length != n_cols:
raise ValueError("FBS column count does not match number of columns in underlying matrix")
columns_data = {}
columns_type = {}
for col_idx in range(0, columns_length):
col = matrix.Columns(col_idx)
tarr = (col.UType(), col.U())
data = deserialize_typed_array(tarr)
columns_data[columns_index[col_idx]] = data
if len(data) != n_rows:
raise ValueError("FBS column length does not match number of rows")
if col.UType() is TypedArray.TypedArray.JSONEncodedArray:
columns_type[columns_index[col_idx]] = "category"
df = pd.DataFrame.from_dict(data=columns_data).astype(columns_type, copy=False)
# more sanity checks
if not df.columns.is_unique or len(df.columns) != n_cols:
raise KeyError("FBS column indices are not unique")
return df
|
d3ffdd5f0d74a6e07b47fac175dae4ad6035cda8
| 3,642,085
|
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Track states and offer events for sensors."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
return True
|
eaeee6df6c8b632fb331884236f7b3b47d551683
| 3,642,086
|
def _all_lists_equal_lenght(values: t.List[t.List[str]]) -> bool:
"""
Tests to see if all the lengths of all the elements are the same
"""
for vn in values:
if len(values[0]) != len(vn):
return False
return True
|
9fd7db874658822d7a48d5f27340e0dfd5a2d177
| 3,642,087
|
def wind_shear(
shear: str, unit_alt: str = "ft", unit_wind: str = "kt", spoken: bool = False
) -> str:
"""Translate wind shear into a readable string
Ex: Wind shear 2000ft from 140 at 30kt
"""
if not shear or "WS" not in shear or "/" not in shear:
return ""
shear = shear[2:].rstrip(unit_wind.upper()).split("/")
wdir = core.spoken_number(shear[1][:3], True) if spoken else shear[1][:3]
return f"Wind shear {int(shear[0])*100}{unit_alt} from {wdir} at {shear[1][3:]}{unit_wind}"
|
b7aaf5253e251393a508de2d8080a5b3458c45f6
| 3,642,088
|
def root_hash(hashes):
"""
Compute the root hash of a merkle tree with the given list of leaf hashes
"""
# the number of hashes must be a power of two
assert len(hashes) & (len(hashes) - 1) == 0
while len(hashes) > 1:
hashes = [sha256(l + r).digest() for l, r in zip(*[iter(hashes)] * 2)]
return hashes[0]
|
9036414c71e192a62968a9939d56f9523359c877
| 3,642,089
|
import json
def DumpStr(obj, pretty=False, newline=None, **json_dumps_kwargs):
"""Serialize a Python object to a JSON string.
Args:
obj: a Python object to be serialized.
pretty: True to output in human-friendly pretty format.
newline: True to append a newline in the end of result, default to the
previous argument ``pretty``.
json_dumps_kwargs: Any allowable arguments to json.dumps.
Returns:
The serialized JSON string.
"""
if newline is None:
newline = pretty
if pretty:
kwargs = dict(indent=2, separators=(',', ': '), sort_keys=True)
else:
kwargs = {}
kwargs.update(json_dumps_kwargs)
result = json.dumps(obj, **kwargs)
if newline:
result += '\n'
return result
|
97ed8c722d8d9e545f29214fbc8a817e6cf4ca1a
| 3,642,090
|
def snake_case(s: str):
"""
Transform into a lower case string with underscores between words.
Parameters
----------
s : str
Original string to transform.
Returns
-------
Transformed string.
"""
return _change_case(s, '_', str.lower)
|
c4dc65445e424101b3b5264c2f14e0aa0d7bcd22
| 3,642,091
|
def multiple_workers_thread(worker_fn, queue_capacity_input=1000, queue_capacity_output=1000, n_worker=3):
"""
:param worker_fn: lambda (tid, queue): pass
:param queue_capacity:
:param n_worker:
:return:
"""
threads = []
queue_input = Queue.Queue(queue_capacity_input)
queue_output = Queue.Queue(queue_capacity_output)
for i in range(n_worker):
t = threading.Thread(target=worker_fn, args=(i, queue_input, queue_output))
threads.append(t)
t.start()
print 'multiple_workers_thread: start %s sub-processes' % n_worker
return queue_input, queue_output, threads
|
b9a989404b6f7e3aa6b028af5e55719364c62fd8
| 3,642,092
|
from typing import List
from typing import Any
def reorder(list_1: List[Any]) -> List[Any]:
"""This function takes a list and returns it in sorted order"""
new_list: list = []
for ele in list_1:
new_list.append(ele)
temp = new_list.index(ele)
while temp > 0:
if new_list[temp - 1] > new_list[temp]:
new_list[temp - 1], new_list[temp] = new_list[temp], new_list[temp-1]
else:
break
temp = temp - 1
return new_list
|
2e7dad8fa138b1a9a140deab4223eea4a09cdf91
| 3,642,093
|
def is_extended_markdown(view):
"""True if the view contains 'Markdown Extended'
syntax'ed text.
"""
return view.settings().get("syntax").endswith(
"Markdown Extended.sublime-syntax")
|
5c870fd277910f6fa48f2b8ae0dfd304fdbddff0
| 3,642,094
|
def match_command_to_alias(command, aliases, match_multiple=False):
"""
Match the text against an action and return the action reference.
"""
results = []
for alias in aliases:
formats = list_format_strings_from_aliases([alias], match_multiple)
for format_ in formats:
try:
extract_parameters(format_str=format_['representation'],
param_stream=command)
except ParseException:
continue
results.append(format_)
return results
|
74712b70cb5995c30c7948991d60954732e4bc16
| 3,642,096
|
def dan_acf(x, axis=0, fast=False):
"""
DFM's acf function
Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2**np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
m[axis] = 0
return acf / acf[m]
|
ec258af743184c09e1962f1445ec6971b4d0cfab
| 3,642,097
|
import re
def set_selenium_local_session(proxy_address,
proxy_port,
proxy_username,
proxy_password,
proxy_chrome_extension,
headless_browser,
use_firefox,
browser_profile_path,
disable_image_load,
page_delay,
logger):
"""Starts local session for a selenium server.
Default case scenario."""
browser = None
err_msg = ''
if use_firefox:
firefox_options = Firefox_Options()
if headless_browser:
firefox_options.add_argument('-headless')
if browser_profile_path is not None:
firefox_profile = webdriver.FirefoxProfile(
browser_profile_path)
else:
firefox_profile = webdriver.FirefoxProfile()
# set English language
firefox_profile.set_preference('intl.accept_languages', 'en')
if disable_image_load:
# permissions.default.image = 2: Disable images load,
# this setting can improve pageload & save bandwidth
firefox_profile.set_preference('permissions.default.image', 2)
if proxy_address and proxy_port:
firefox_profile.set_preference('network.proxy.type', 1)
firefox_profile.set_preference('network.proxy.http',
proxy_address)
firefox_profile.set_preference('network.proxy.http_port',
proxy_port)
firefox_profile.set_preference('network.proxy.ssl',
proxy_address)
firefox_profile.set_preference('network.proxy.ssl_port',
proxy_port)
browser = webdriver.Firefox(firefox_profile=firefox_profile,
options=firefox_options)
# converts to custom browser
# browser = convert_selenium_browser(browser)
# authenticate with popup alert window
if (proxy_username and proxy_password):
proxy_authentication(browser,
logger,
proxy_username,
proxy_password)
else:
chromedriver_location = get_chromedriver_location()
chrome_options = Options()
chrome_options.add_argument('--mute-audio')
chrome_options.add_argument('--dns-prefetch-disable')
chrome_options.add_argument('--lang=en-US')
chrome_options.add_argument('--disable-setuid-sandbox')
chrome_options.add_argument('--no-sandbox')
# this option implements Chrome Headless, a new (late 2017)
# GUI-less browser. chromedriver 2.9 and above required
if headless_browser:
chrome_options.add_argument('--headless')
if disable_image_load:
chrome_options.add_argument(
'--blink-settings=imagesEnabled=false')
# replaces browser User Agent from "HeadlessChrome".
user_agent = "Chrome"
chrome_options.add_argument('user-agent={user_agent}'
.format(user_agent=user_agent))
capabilities = DesiredCapabilities.CHROME
# Proxy for chrome
if proxy_address and proxy_port:
prox = Proxy()
proxy = ":".join([proxy_address, str(proxy_port)])
if headless_browser:
chrome_options.add_argument(
'--proxy-server=http://{}'.format(proxy))
else:
prox.proxy_type = ProxyType.MANUAL
prox.http_proxy = proxy
prox.socks_proxy = proxy
prox.ssl_proxy = proxy
prox.add_to_capabilities(capabilities)
# add proxy extension
if proxy_chrome_extension and not headless_browser:
chrome_options.add_extension(proxy_chrome_extension)
# using saved profile for chrome
if browser_profile_path is not None:
chrome_options.add_argument(
'user-data-dir={}'.format(browser_profile_path))
chrome_prefs = {
'intl.accept_languages': 'en-US',
}
if disable_image_load:
chrome_prefs['profile.managed_default_content_settings.images'] = 2
chrome_options.add_experimental_option('prefs', chrome_prefs)
try:
browser = webdriver.Chrome(chromedriver_location,
desired_capabilities=capabilities,
chrome_options=chrome_options)
# gets custom instance
# browser = convert_selenium_browser(browser)
except WebDriverException as exc:
logger.exception(exc)
err_msg = 'ensure chromedriver is installed at {}'.format(
Settings.chromedriver_location)
return browser, err_msg
# prevent: Message: unknown error: call function result missing 'value'
matches = re.match(r'^(\d+\.\d+)',
browser.capabilities['chrome'][
'chromedriverVersion'])
if float(matches.groups()[0]) < Settings.chromedriver_min_version:
err_msg = 'chromedriver {} is not supported, expects {}+'.format(
float(matches.groups()[0]), Settings.chromedriver_min_version)
return browser, err_msg
browser.implicitly_wait(page_delay)
message = "Session started!"
highlight_print('browser', message, "initialization", "info", logger)
print('')
return browser, err_msg
|
4f70baeea220c8e4e21097a5a9d9d54a47f55c2e
| 3,642,098
|
def match():
"""Show a timer of the match length and an upload button"""
player_west = request.form['player_west']
player_east = request.form['player_east']
start_time = dt.datetime.now().strftime('%Y%m%d%H%M')
# generate filename to save video to
filename = '{}_vs_{}_{}.h264'.format(player_west, player_east, start_time)
GAMESAVER.filename = filename
GAMESAVER.start_recording(filename)
return render_template('match.html',
player_west=player_west,
player_east=player_east,
filename=filename)
|
7d2e51ddfaafdff903a31b5662093ab423d8f3a1
| 3,642,099
|
def start_end_epoch(graph):
"""
Start epoch of graph.
:return: (start epoch, end epoch).
"""
start = 0
end = 0
for e in graph.edges_iter():
for _, p in graph[e[0]][e[1]].items():
end = max(end, p['etime_epoch_secs'])
if start == 0:
start = p['stime_epoch_secs']
else:
start = min(start, p['stime_epoch_secs'])
return (start, end)
|
724726ec83d3a98539eed859ec584c6f1adb8567
| 3,642,100
|
def distance_metric(seg_A, seg_B, dx):
"""
Measure the distance errors between the contours of two segmentations.
The manual contours are drawn on 2D slices.
We calculate contour to contour distance for each slice.
"""
table_md = []
table_hd = []
X, Y, Z = seg_A.shape
for z in range(Z):
# Binary mask at this slice
slice_A = seg_A[:, :, z].astype(np.uint8)
slice_B = seg_B[:, :, z].astype(np.uint8)
# The distance is defined only when both contours exist on this slice
if np.sum(slice_A) > 0 and np.sum(slice_B) > 0:
# Find contours and retrieve all the points
_, contours, _ = cv2.findContours(cv2.inRange(slice_A, 1, 1),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
pts_A = contours[0]
for i in range(1, len(contours)):
pts_A = np.vstack((pts_A, contours[i]))
_, contours, _ = cv2.findContours(cv2.inRange(slice_B, 1, 1),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
pts_B = contours[0]
for i in range(1, len(contours)):
pts_B = np.vstack((pts_B, contours[i]))
# Distance matrix between point sets
M = np.zeros((len(pts_A), len(pts_B)))
for i in range(len(pts_A)):
for j in range(len(pts_B)):
M[i, j] = np.linalg.norm(pts_A[i, 0] - pts_B[j, 0])
# Mean distance and hausdorff distance
md = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1))) * dx
hd = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))]) * dx
table_md += [md]
table_hd += [hd]
# Return the mean distance and Hausdorff distance across 2D slices
mean_md = np.mean(table_md) if table_md else None
mean_hd = np.mean(table_hd) if table_hd else None
return mean_md, mean_hd
|
4ae5de6428914c8352ae1c6cdd9e94183d9ea3f8
| 3,642,101
|
def tomographic_redshift_bin(z_s, version=default_version):
"""DES analyses work in pre-defined tomographic redshift bins. This
function returns the photometric redshift bin as a function of photometric
redshift.
Parameters
----------
z_s : numpy array
Photometric redshifts.
version : string
Which catalog version to use.
Returns
-------
z_bin : numpy array
The tomographic redshift bin corresponding to each photometric
redshift. Returns -1 in case a redshift does not fall into any bin.
"""
if version == 'Y1':
z_bins = [0.2, 0.43, 0.63, 0.9, 1.3]
else:
raise RuntimeError(
"Unkown version of DES. Supported versions are {}.".format(
known_versions))
z_bin = np.digitize(z_s, z_bins) - 1
z_bin = np.where((z_s < np.amin(z_bins)) | (z_s >= np.amax(z_bins)) |
np.isnan(z_s), -1, z_bin)
return z_bin
|
b4a21c111b8d5b5a34c018315f95cf18deb356af
| 3,642,102
|
def is_point_in_rect(point, rect):
"""Checks whether is coordinate point inside the rectangle or not.
Rectangle is defined by bounding box.
:type point: list
:param point: testing coordinate point
:type rect: list
:param rect: bounding box
:rtype: boolean
:return: boolean check result
"""
x0, y0, x1, y1 = rect
x, y = point
if x0 <= x <= x1 and y0 <= y <= y1:
return True
return False
|
d0c7a64138899f4e50b42dc75ea6030616d4dfec
| 3,642,103
|
def chinese_theorem_inv(modulo_list):
"""
Returns (x, n1*...*nk) such as
x mod mk = ak for all k, with
modulo_list = [(a1, n1), ..., (ak, nk)]
n1, ..., nk most be coprime 2 by 2.
"""
a, n = modulo_list[0]
for a2, n2 in modulo_list[1:]:
u, v = bezout(n, n2)
a, n = a*v*n2+a2*u*n, n*n2
for (a1, n1) in modulo_list:
assert a % n1 == a1
return ((n+a % n) % n, n)
|
3d1398901b75ca8b21fb97af0acdfbd65fec0a3e
| 3,642,104
|
def compute_segment_cores(split_lines_of_utt):
"""
This function returns a list of pairs (start-index, end-index) representing
the cores of segments (so if a pair is (s, e), then the core of a segment
would span (s, s+1, ... e-1).
The argument 'split_lines_of_utt' is list of lines from a ctm-edits file
corresponding to a single utterance.
By the 'core of a segment', we mean a sequence of ctm-edits lines including
at least one 'cor' line and a contiguous sequence of other lines of the
type 'cor', 'fix' and 'sil' that must be not tainted. The segment core
excludes any tainted lines at the edge of a segment, which will be added
later.
We only initiate segments when it contains something correct and not
realized as unk (i.e. ref==hyp); and we extend it with anything that is
'sil' or 'fix' or 'cor' that is not tainted. Contiguous regions of 'true'
in the resulting boolean array will then become the cores of prototype
segments, and we'll add any adjacent tainted words (or parts of them).
"""
num_lines = len(split_lines_of_utt)
line_is_in_segment_core = [False] * num_lines
# include only the correct lines
for i in range(num_lines):
if (split_lines_of_utt[i][7] == 'cor'
and split_lines_of_utt[i][4] == split_lines_of_utt[i][6]):
line_is_in_segment_core[i] = True
# extend each proto-segment forwards as far as we can:
for i in range(1, num_lines):
if line_is_in_segment_core[i - 1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if (not is_tainted(split_lines_of_utt[i])
and (edit_type == 'cor' or edit_type == 'sil'
or edit_type == 'fix')):
line_is_in_segment_core[i] = True
# extend each proto-segment backwards as far as we can:
for i in reversed(range(0, num_lines - 1)):
if line_is_in_segment_core[i + 1] and not line_is_in_segment_core[i]:
edit_type = split_lines_of_utt[i][7]
if (not is_tainted(split_lines_of_utt[i])
and (edit_type == 'cor' or edit_type == 'sil'
or edit_type == 'fix')):
line_is_in_segment_core[i] = True
# Get contiguous regions of line in the form of a list
# of (start_index, end_index)
segment_ranges = []
cur_segment_start = None
for i in range(0, num_lines):
if line_is_in_segment_core[i]:
if cur_segment_start is None:
cur_segment_start = i
else:
if cur_segment_start is not None:
segment_ranges.append((cur_segment_start, i))
cur_segment_start = None
if cur_segment_start is not None:
segment_ranges.append((cur_segment_start, num_lines))
return segment_ranges
|
0d054d1f891127f0a27b20bfbd82ad6ce85dec39
| 3,642,105
|
def _classification(dataset='iris',k_range=[1,31],dist_metric='l1'):
"""
knn on classificaiton dataset
Inputs:
dataset: (str) name of dataset
k: (list) k[0]:lower bound of number of nearest neighbours; k[1]:upper bound of number of nearest neighbours
dist_metric: (str) 'l1' or 'l2'
Outputs:
validation accuracy
"""
print ('------Processing Dataset '+dataset+' ------')
x_train, x_valid, x_test, y_train, y_valid, y_test = load_dataset(dataset)
if y_train.dtype==np.dtype('bool'):
y_train = _cast_TF(y_train)
y_valid = _cast_TF(y_valid)
y_test = _cast_TF(y_test)
acc = []
predicted = _eval_knn(k_range,x_train,y_train,x_valid,y_valid,dist_metric,compute_loss=False)
for k in range(k_range[0],k_range[1]):
#print(k)
curr_predict = predicted['k='+str(k)]
#print(curr_predict)
result = np.argmax(curr_predict,axis=1)
#print(result)
gt = np.where(y_valid==True,1,0)
gt = np.argmax(gt,axis=1)
unique, counts = np.unique(result-gt, return_counts=True)
correct = dict(zip(unique, counts))[0]
#print(correct)
acc.append(correct/y_valid.shape[0])
return acc
|
ce5d0516cffcb545787abe15c46fe086ff8e4991
| 3,642,107
|
def make_move(board, max_rows, max_cols, col, player):
"""Put player's piece in column COL of the board, if it is a valid move.
Return a tuple of two values:
1. If the move is valid, make_move returns the index of the row the
piece is placed in. Otherwise, it returns -1.
2. The updated board
>>> rows, columns = 2, 2
>>> board = create_board(rows, columns)
>>> row, board = make_move(board, rows, columns, 0, 'X')
>>> row
1
>>> get_piece(board, 1, 0)
'X'
>>> row, board = make_move(board, rows, columns, 0, 'O')
>>> row
0
>>> row, board = make_move(board, rows, columns, 0, 'X')
>>> row
-1
>>> row, board = make_move(board, rows, columns, -4, '0')
>>> row
-1
"""
if -1 < col and col < max_cols:
return put_piece(board, max_rows, col, player)
return (-1, board)
|
62ecffbabb83e0ee4119b8b8dbead6bdaeb24fb6
| 3,642,109
|
def convert_timestamp(ts):
"""Converts the timestamp to a format suitable for Billing.
Examples of a good timestamp for startTime, endTime, and eventTime:
'2016-05-20T00:00:00Z'
Note the trailing 'Z'. Python does not add the 'Z' so we tack it on
ourselves.
"""
return ts.isoformat() + 'Z'
|
6b8d19671cbeab69c398508fa942e36689802cdd
| 3,642,110
|
def object_id(obj, clazz=None):
"""Turn a given object into an ID that can be stored in with
the notification."""
clazz = clazz or type(obj)
if isinstance(obj, clazz):
obj = obj.id
elif is_mapping(obj):
obj = obj.get('id')
return obj
|
617ae362af894c2f27cc6e032aad7f8df4c33a7c
| 3,642,111
|
def get_email_from_request(request):
"""
Get 'Authorization' from request header,
and parse the email address using cpg-util
"""
auth_header = request.headers.get('Authorization')
if auth_header is None:
raise web.HTTPUnauthorized(reason='Missing authorization header')
try:
id_token = auth_header[7:] # Strip the 'bearer' / 'Bearer' prefix.
return email_from_id_token(id_token)
except ValueError as e:
raise web.HTTPForbidden(reason='Invalid authorization header') from e
|
353604d8021948f4cb6ed80d4fb8a9000b8457ce
| 3,642,112
|
def play(url, offset, text, card_data, response_builder):
"""Function to play audio.
Using the function to begin playing audio when:
- Play Audio Intent is invoked.
- Resuming audio when stopped / paused.
- Next / Previous commands issues.
https://developer.amazon.com/docs/custom-skills/audioplayer-interface-reference.html#play
REPLACE_ALL: Immediately begin playback of the specified stream,
and replace current and enqueued streams.
"""
# type: (str, int, str, Dict, ResponseFactory) -> Response
logger.info("play : 52 v2")
logger.info(url)
logger.info(offset)
logger.info(text)
logger.info(card_data)
if card_data:
logger.info("play : 60")
response_builder.set_card(
StandardCard(
title=card_data["title"], text=card_data["text"],
image=Image(
small_image_url=card_data["small_image_url"],
large_image_url=card_data["large_image_url"])
)
)
# Using URL as token as they are all unique
logger.info("play : 71")
response_builder.add_directive(
PlayDirective(
play_behavior=PlayBehavior.REPLACE_ALL,
audio_item=AudioItem(
stream=Stream(
token=url,
url=url,
offset_in_milliseconds=offset,
expected_previous_token=None),
metadata=add_screen_background(card_data) if card_data else None
)
)
).set_should_end_session(True)
logger.info("play : 85")
if text:
logger.info("play : 87")
response_builder.speak(text)
logger.info("play : 90")
return response_builder.response
|
1a7159adc481d86c35c9206cf8525940b6d1ece3
| 3,642,113
|
from typing import List
from typing import Dict
def all_flags_match_bombs(cells: List[List[Dict]]) -> bool:
"""
Checks whether all flags are placed correctly
and there are no flags over regular cells (not bombs)
:param cells: array of array of cells dicts
:return: True if all flags are placed correctly
"""
for row in cells:
for cell in row:
if cell["mask"] == CellMask.FLAG and cell["value"] != "*":
return False
return True
|
67cc53d8b2ea3541112245192763a6c5f8593b86
| 3,642,114
|
def household_id_list(filelist, pidp):
""" For a set of waves, obtain a list of household IDs belonging to the same individual. """
hidp_list = []
wave_list = []
wn = {1:'a', 2:'b', 3:'c', 4:'d', 5:'e', 6:'f', 7:'g'}
c=1
for name in filelist:
print("Loading wave %d data..." % c)
df = pd.read_csv(name, sep='\t')
if pidp in df['pidp'].values:
kword = wn[c]+'_hidp'
hidp = df.loc[df['pidp'] == pidp, kword].values
hidp_list.append(hidp)
wave_list.append(c)
c+=1
print("\nIndividual %d present in waves {}".format(wave_list) % pidp)
return hidp_list
|
8fd7b271034eb953c708ea38bd75ab4671f420cb
| 3,642,115
|
from typing import Tuple
def biggest_labelizer_arbitrary(metrics: dict, choice: str, *args, **kwargs) -> Tuple[str, float]:
"""Given dict of metrics result, returns (key, metrics[key]) whose value is maximal."""
metric_values = list(metrics.values())
metric_keys = list(metrics.keys())
# print(items)
big = metric_values[0]
draws = [0]
for idx, val in enumerate(metric_values[1:], start=1):
if val > big:
big = val
draws = [idx]
elif val == big:
draws.append(idx)
if len(draws) > 1 and choice in (metric_keys[idx] for idx in draws):
return choice, big
return metric_keys[draws[0]], big
|
99f4a0f5233f33d80a328cef4e43f339813371a1
| 3,642,116
|
from typing import Optional
from typing import Dict
from typing import Any
def _seaborn_viz_histogram(data, x: str, contrast: Optional[str] = None, **kwargs):
"""Plot a single histogram.
Args:
data (DataFrame): The data
x (str): The name of the column to plot.
contrast (str, optional): The name of the categorical column to use for multiple contrasts.
**kwargs: Keyword arguments passed to seaborn.distplot
Raises:
ValueError: Not a numeric column.
Returns:
Seaborn Axis Object
"""
if x not in data.select_dtypes("number").columns:
raise ValueError("x must be numeric column")
default_hist_kwargs: Dict[str, Any] = {}
hist_kwargs = {**default_hist_kwargs, **(kwargs or {})}
if contrast:
data[contrast] = data[contrast].astype("category")
ax = sns.histplot(x=x, hue=contrast, data=data, **hist_kwargs)
else:
ax = sns.histplot(data[x], **hist_kwargs)
ax.set_title(f"Histogram of {x}")
return ax
|
27aed8280c372273e02e7b49647b4e2285a81fa7
| 3,642,117
|
def str2bool( s ):
"""
Description:
----------
Converting an input string to a boolean
Arguments:
----------
[NAME] [TYPE] [DESCRIPTION]
(1) s dict, str The string which
Returns:
----------
True/False depending on the given input strin gv
"""
if isinstance( s, dict ):
for key, _ in s.items():
s[ key ] = str2bool( s[ key ] )
else:
return v.lower() in ( "yes", "true", "t", "1" )
|
cb68fe0382561d69fb332b75c99c01c5a338196f
| 3,642,118
|
def im_detect(net, im, boxes=None):
"""Detect object classes in an image given object proposals.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals or None (for RPN)
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
blobs, im_scales = _get_blobs(im, boxes)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
# if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# v = np.array([1, 1e3, 1e6, 1e9, 1e12])
# hashes = np.round(blobs['rois'] * cfg.DEDUP_BOXES).dot(v)
# _, index, inv_index = np.unique(hashes, return_index=True,
# return_inverse=True)
# blobs['rois'] = blobs['rois'][index, :]
# boxes = boxes[index, :]
if cfg.TEST.HAS_RPN:
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# reshape network inputs
net.blobs['data'].reshape(*(blobs['data'].shape))
if cfg.TEST.HAS_RPN:
net.blobs['im_info'].reshape(*(blobs['im_info'].shape))
else:
net.blobs['rois'].reshape(*(blobs['rois'].shape))
# do forward
forward_kwargs = {'data': blobs['data'].astype(np.float32, copy=False)}
if cfg.TEST.HAS_RPN:
forward_kwargs['im_info'] = blobs['im_info'].astype(np.float32, copy=False)
else:
forward_kwargs['rois'] = blobs['rois'].astype(np.float32, copy=False)
blobs_out = net.forward(**forward_kwargs)
if cfg.TEST.HAS_RPN:
assert len(im_scales) == 1, "Only single-image batch implemented"
rois = net.blobs['rois'].data.copy()
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
if cfg.TEST.SVM:
# use the raw scores before softmax under the assumption they
# were trained as linear SVMs
scores = net.blobs['cls_score'].data
else:
# use softmax estimated probabilities
scores = blobs_out['cls_prob']
# if cfg.TEST.BBOX_REG:
if False:
# Apply bounding-box regression deltas
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes = clip_boxes(pred_boxes, im.shape)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
# if cfg.DEDUP_BOXES > 0 and not cfg.TEST.HAS_RPN:
# # Map scores and predictions back to the original set of boxes
# scores = scores[inv_index, :]
# pred_boxes = pred_boxes[inv_index, :]
fc7 = net.blobs['fc7'].data
return net.blobs['cls_score'].data[:, :], scores, fc7, pred_boxes
|
f37cb46375f39b6baf839be5ec68388c79f47e16
| 3,642,119
|
def dispatch_error_adaptor(func):
"""Construct a signature isomorphic to dispatch_error.
The actual handler will receive only arguments explicitly
declared, and a possible tg_format parameter.
"""
def adaptor(controller, tg_source,
tg_errors, tg_exceptions, *args, **kw):
tg_format = kw.pop('tg_format', None)
args, kw = inject_args(func, {"tg_source": tg_source,
"tg_errors": tg_errors, "tg_exceptions": tg_exceptions},
args, kw, 1)
args, kw = adapt_call(func, args, kw, 1)
if tg_format is not None:
kw['tg_format'] = tg_format
return func(controller, *args, **kw)
return adaptor
|
67be23f01c11d668d86f5e2b1afcfb76db79ea6c
| 3,642,121
|
import re
def address_split(address, env=None):
"""The address_split() function splits an address into its four
components. Address strings are on the form
detector-detectorID|device-deviceID, where the detectors must be in
dir(xtc.DetInfo.Detector) and device must be in
(xtc.DetInfo.Device).
@param address Full data source address of the DAQ device
@param env Optional env to dereference an alias into an address
@return Four-tuple of detector name, detector ID, device, and
device ID
"""
# pyana
m = re.match(
r"^(?P<det>\S+)\-(?P<det_id>\d+)\|(?P<dev>\S+)\-(?P<dev_id>\d+)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
# psana
m = re.match(
r"^(?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
# psana DetInfo string
m = re.match(
r"^DetInfo\((?P<det>\S+)\.(?P<det_id>\d+)\:(?P<dev>\S+)\.(?P<dev_id>\d+)\)$", address)
if m is not None:
return (m.group('det'), m.group('det_id'), m.group('dev'), m.group('dev_id'))
if env is not None:
# Try to see if this is a detector alias, and if so, dereference it. Code from psana's Detector/PyDetector.py
amap = env.aliasMap()
alias_src = amap.src(address) # string --> DAQ-style psana.Src
# if it is an alias, look up the full name
if amap.alias(alias_src) != '': # alias found
address = str(alias_src)
return address_split(address)
return (None, None, None, None)
|
c5d362c7fc6121d64ec6a660bcdb7a9b4b532553
| 3,642,122
|
import time
def solveGroth(A, n, init_val=None):
"""
...
Parameters
----------
A: np.matrix
dfajdslkf
n: int
ddddddd
init_val: float, optional
dsfdsafdasfd
Returns
-------
list of
float:
float:
float:
float:
"""
eps=0.5
eta=0.05
threshold = 1.1
N = n
Ap = A
if init_val is not None:
w=init_val
else:
w = np.ones(N)
min_val = np.sum(np.abs(Ap))
curr_y = np.zeros(N)
curr_alpha = np.zeros(N)
avg_y_val = np.zeros(N)
avg_X=np.zeros((N,N))
avg_alpha=np.zeros(N)
T=4*N
schedule_size = round(T/8) #We change eps in epochs
print("iteration bound:",T)
z = np.zeros(N)
vals = 0
g = np.random.standard_normal(T)
for i in range(T):
if (i+1)%(T/2)==0:
eps=0.01
if i%schedule_size ==0 and eps>0.01 :
eps=eps/2
if i%schedule_size == 0 and i>T/2:
eps /= 2
wtil=(1-eta)*w+eta*np.ones(N)
w1 = np.array([1/np.sqrt(j) for j in wtil])
start_time = time.time()
d = np.tile(w1, (N, 1))
M = np.multiply(Ap,d)
d = np.tile(np.array([w1]).transpose(), (1,N))
M = np.multiply(M,d)
start_time = time.time()
eigval, eigvec = lasp.eigsh(M, k=1, which='LA', tol=0.00001)
y = eigvec[:,0]
y *= np.sqrt(N)
y = np.multiply(y,w1)
avg_y_val += y**2
val = np.matmul(np.transpose(y), np.matmul(Ap,y))
avg_alpha+=val*w
if val < min_val:
min_val = val
curr_y = y
curr_alpha = w
vals += val
print("iterate", i, "val = ", val, " minval=", min_val, " linf of curr y=", np.max(np.abs(y**2)) , " infinity norm avg X =", np.max((1.0/(i+1))*avg_y_val), "SDP sol val:", vals/(i+1), "eps,eta=", eps, " , ", eta)
if checkCondition(y,threshold):
print(y,"Current iterate Condition satisfied, i : ",i)
print("min val = ", min_val)
print("curr_y = ", curr_y)
print("curr_alpha = ", curr_alpha)
print("inf norm of curr_y=", max(abs(curr_y)))
return [np.matmul(curr_y,curr_y.T),min_val, curr_alpha, avg_y_val]
elif checkCondition((1.0/(i+1))*avg_y_val, threshold):
avg_y_val=(1.0/(i+1))*avg_y_val
avg_val = vals/(i+1)
print(avg_y_val," Avg Condition satisfied, i : ",i)
print("min val = ", min_val)
print("curr val=", avg_val)
print("curr_alpha = ", (1.0/i)*avg_alpha)
print("inf norm of avg_y=", max(abs(avg_y_val)))
return [(1.0/(i+1))*avg_X,min_val, curr_alpha, avg_y_val]
if i < T/2:
w = updateWeights_2(w,y,threshold, eps, N)
else:
w = updateWeights(w,y,threshold, 2*eps, N)
u = y*g[i]
z += u
print("min val = ", min_val)
print("sum of curr_alpha = ", sum(curr_alpha))
print("sum weights at end = ", sum(w))
print("inf norm of curr_y=", max(abs(curr_y)))
return [np.matmul(curr_y, curr_y.T), min_val, curr_alpha, avg_y_val]
|
a8f4a3ea2274bd1a81565500683dea84378ccddc
| 3,642,123
|
def verify_any(func, *args, **kwargs):
"""
Assert that any of `func(*args, **kwargs)` are true.
"""
return _verify(func, 'any', *args, **kwargs)
|
618165e6a9f252ac2ddeffdb9defa34f2d281900
| 3,642,124
|
def can_create_election(user_id, user_info):
""" for now, just let it be"""
return True
|
06c8290b41b38a840b7826173fd65130d38260a7
| 3,642,125
|
from .path import Path2D
def circle_pattern(pattern_radius,
circle_radius,
count,
center=[0.0, 0.0],
angle=None,
**kwargs):
"""
Create a Path2D representing a circle pattern.
Parameters
------------
pattern_radius : float
Radius of circle centers
circle_radius : float
The radius of each circle
count : int
Number of circles in the pattern
center : (2,) float
Center of pattern
angle : float
If defined pattern will span this angle
If None, pattern will be evenly spaced
Returns
-------------
pattern : trimesh.path.Path2D
Path containing circular pattern
"""
if angle is None:
angles = np.linspace(0.0, np.pi * 2.0, count + 1)[:-1]
elif isinstance(angle, float) or isinstance(angle, int):
angles = np.linspace(0.0, angle, count)
else:
raise ValueError('angle must be float or int!')
# centers of circles
centers = np.column_stack((
np.cos(angles), np.sin(angles))) * pattern_radius
vert = []
ents = []
for circle_center in centers:
# (3,3) center points of arc
three = arc.to_threepoint(angles=[0, np.pi],
center=circle_center,
radius=circle_radius)
# add a single circle entity
ents.append(
Arc(
points=np.arange(3) + len(vert),
closed=True))
# keep flat array by extend instead of append
vert.extend(three)
# translate vertices to pattern center
vert = np.array(vert) + center
pattern = Path2D(entities=ents,
vertices=vert,
**kwargs)
return pattern
|
b82d60c7a76f12349605191b16bf04d7899c3a3a
| 3,642,127
|
def boolean(input):
"""Convert the given input to a boolean value.
Intelligently handles boolean and non-string values, returning
as-is and passing to the bool builtin respectively.
This process is case-insensitive.
Acceptable values:
True
* yes
* y
* on
* true
* t
* 1
False
* no
* n
* off
* false
* f
* 0
:param input: the value to convert to a boolean
:type input: any
:returns: converted boolean value
:rtype: bool
"""
try:
input = input.strip().lower()
except AttributeError:
return bool(input)
if input in ('yes', 'y', 'on', 'true', 't', '1'):
return True
if input in ('no', 'n', 'off', 'false', 'f', '0'):
return False
raise ValueError("Unable to convert {0!r} to a boolean value.".format(input))
|
09c09206d5487bf02e3271403e2ba67358e1d148
| 3,642,128
|
def find_horizontal_up_down_links(tc, u, out_up=None, out_down=None):
"""Find indices of nodes that locate
at horizontally upcurrent and downcurrent directions
"""
if out_up is None:
out_up = np.zeros(u.shape[0], dtype=np.int)
if out_down is None:
out_down = np.zeros(u.shape[0], dtype=np.int)
out_up[:] = tc.link_west[:]
out_down[:] = tc.link_east[:]
negative_u_index = np.where(u < 0)[0]
out_up[negative_u_index] = tc.link_east[negative_u_index]
out_down[negative_u_index] = tc.link_west[negative_u_index]
return out_up, out_down
|
b61976a57d8dd850c26c7a9baff11483ccdb306f
| 3,642,129
|
def _compute_composite_beta(model, robo, j, i):
"""
Compute the composite beta wrench for link i.
Args:
model: An instance of DynModel
robo: An instance of Robot
j: link number
i: antecedent value
Returns:
An instance of DynModel that contains all the new values.
"""
i_beta_i_c = Screw()
# local variables
j_s_i = robo.geos[j].tmat.s_i_wrt_j
i_beta_i = model.composite_betas[i].val
j_beta_j_c = model.composite_betas[j].val
j_inertia_j_c = model.composite_inertias[j].val
j_zeta_j = model.zetas[j].val
# actual computation
i_beta_i_c.val = i_beta_i + (j_s_i.transpose() * j_beta_j_c) - \
(j_s_i.transpose() * j_inertia_j_c * j_zeta_j)
# store computed beta in model
model.composite_betas[i] = i_beta_i_c
return model
|
0fa80859787a4e523402d10237b63e33ca0082f4
| 3,642,130
|
def pos(x, y):
"""Returns floored and camera-offset x,y tuple.
Setting out of bounds is possible, but getting is not; mod in callers for get_at.
"""
return (flr(xo + x), flr(yo + y))
|
1a17648c074157c6164856f44cfa309923ca2226
| 3,642,131
|
from typing import List
from typing import Dict
from typing import Tuple
def _check_blockstream_for_transactions(
accounts: List[BTCAddress],
) -> Dict[BTCAddress, Tuple[bool, FVal]]:
"""May raise connection errors or KeyError"""
have_transactions = {}
for account in accounts:
url = f'https://blockstream.info/api/address/{account}'
response_data = request_get_dict(url=url, handle_429=True, backoff_in_seconds=4)
stats = response_data['chain_stats']
balance = satoshis_to_btc(int(stats['funded_txo_sum']) - int(stats['spent_txo_sum']))
have_txs = stats['tx_count'] != 0
have_transactions[account] = (have_txs, balance)
return have_transactions
|
a17a9204dc0d5f11b8c0352d15c871141e7bb09b
| 3,642,132
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.