id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
79106
|
import pandas as pd
import numpy as np
from catboost import CatBoostClassifier
import xgboost as xgb
import lightgbm as lgb
from sklearn.utils import class_weight
from abc import ABC, abstractmethod
class predict_model(ABC):
"""
Abstract class for working with classifiers.
"""
@abstractmethod
def __init__(self, name='predict_model', categ_conv=True):
self.params = {}
self.exclude_list = []
self.name = name
self.random = 1
self.classifier = None
self.categ_conv = categ_conv
self.data_df = {}
def set_params(self, params=None):
if not params:
self.params = {}
else:
self.params = params
def set_random_seed(self, random=1):
self.random = random
@abstractmethod
def load_data(self, data, balance=False):
self.data = data
self.data_df['train'], self.data_df['y'] = self.data.get_train(
balance=balance
)
self.data_df['test'] = self.data.get_test()
self.category_cols = self.data.get_cat_list()
for header in self.category_cols:
self.data_df['train'].loc[:, header] = self.data_df['train'][header].astype('category').cat.codes
self.data_df['test'].loc[:, header] = self.data_df['test'][header].astype('category').cat.codes
return True
def get_train(self):
return self.data_df['train']
def get_y(self):
return self.data_df['y']
def get_test(self):
return self.data_df['test']
def set_exclude_list(self, exclude_list):
self.exclude_list = exclude_list.copy()
@abstractmethod
def get_feature_importances(self):
pass
@abstractmethod
def train(self, x_train=None, y_train=None):
pass
def predict(self, test=None):
if self.classifier:
if not isinstance(test, pd.DataFrame):
test = self.get_test()
elif self.categ_conv:
cols = [x for x in self.category_cols if x in test.columns]
for header in cols:
test.loc[:, header] = test[header].astype('category').cat.codes
test = test.drop(
[x for x in self.exclude_list if x in test.columns], axis=1
)
res = pd.DataFrame(index=test.index)
res['country'] = self.data.country
res['poor'] = self.classifier.predict_proba(test)[:, 1]
return res
else:
print('error: classifier not defined')
return None
class CB_model(predict_model):
"""
Class for a CatBoost classifier.
"""
def __init__(self, name='cat_boost', categ_conv=True):
super().__init__(name='cat_boost', categ_conv=categ_conv)
self.name = name
def load_data(self, data, balance=False):
if super().load_data(data, balance):
c_w = class_weight.compute_class_weight(
class_weight='balanced',
classes=np.unique(self.data_df['y']),
y=self.data_df['y']
)
self.classifier = CatBoostClassifier(**self.params,
class_weights=c_w)
return True
else:
return False
def train(self, x_train=None, y_train=None):
if not isinstance(x_train, pd.DataFrame):
x_train = self.get_train()
elif self.categ_conv:
cols = [x for x in self.category_cols if x in x_train.columns]
for header in cols:
x_train.loc[:, header] = x_train[header].astype('category').cat.codes
if not isinstance(y_train, pd.Series):
y_train = self.get_y()
x_train = x_train.drop([x for x in self.exclude_list
if x in x_train.columns], axis=1)
self.category_cols = [x for x in self.category_cols
if x not in self.exclude_list]
cat_dims = [x_train.columns.get_loc(i) for i in self.category_cols]
print(x_train.shape, y_train.shape, len(self.category_cols))
self.classifier.fit(x_train, y_train, cat_features=cat_dims)
return self.classifier
def get_feature_importances(self):
return self.classifier._feature_importance
class XGB_model(predict_model):
"""
Class for a XGBoost classifier.
"""
def __init__(self, name='xg_boost', categ_conv=True):
super().__init__(name='xg_boost', categ_conv=categ_conv)
self.name = name
def load_data(self, data, balance=False):
if super().load_data(data, balance):
self.params['scale_pos_weight'] = (
(self.data_df['y'].shape[0] - self.data_df['y'].sum()) /
self.data_df['y'].sum()
)
self.classifier = xgb.XGBClassifier(**self.params)
return True
else:
return False
def train(self, x_train=None, y_train=None):
if not isinstance(x_train, pd.DataFrame):
x_train = self.get_train()
elif self.categ_conv:
cols = [x for x in self.category_cols if x in x_train.columns]
for header in cols:
x_train.loc[:, header] = x_train[header].astype('category').cat.codes
if not isinstance(y_train, pd.Series):
y_train = self.get_y()
x_train = x_train.drop([x for x in self.exclude_list
if x in x_train.columns], axis=1)
print('x_train shape: ', x_train.shape)
self.classifier.fit(x_train, y_train)
return self.classifier
def get_feature_importances(self):
return self.classifier.feature_importances_
class LGBM_model(predict_model):
"""
Class for LightGBM classifier.
"""
def __init__(self, name='lgbm', categ_conv=True):
super().__init__(name='lgbm', categ_conv=categ_conv)
self.name = name
def load_data(self, data, balance=False):
if super().load_data(data, balance):
self.classifier = lgb.LGBMClassifier(**self.params)
return True
else:
return False
def train(self, x_train=None, y_train=None):
if not isinstance(x_train, pd.DataFrame):
x_train = self.get_train()
elif self.categ_conv:
cols = [x for x in self.category_cols if x in x_train.columns]
for header in cols:
x_train.loc[:, header] = x_train[header].astype('category').cat.codes
if not isinstance(y_train, pd.Series):
y_train = self.get_y()
x_train = x_train.drop([x for x in self.exclude_list
if x in x_train.columns], axis=1)
print('x_train shape: ', x_train.shape)
self.category_cols = [x for x in self.category_cols
if x not in self.exclude_list]
self.classifier.fit(x_train, y_train, verbose=False)
return self.classifier
def get_feature_importances(self):
return self.classifier.feature_importances_
|
79135
|
from bokeh.resources import CDN
from flask import request, current_app
from flask_security import login_required
from flask_security.core import current_user
from flexmeasures.data.config import db
from flexmeasures.ui.views import flexmeasures_ui
from flexmeasures.ui.utils.view_utils import render_flexmeasures_template, clear_session
from flexmeasures.data.services.resources import (
get_asset_group_queries,
Resource,
get_center_location,
)
# Dashboard (default root view, see utils/app_utils.py)
@flexmeasures_ui.route("/dashboard")
@login_required
def dashboard_view():
"""Dashboard view.
This is the default landing page for the platform user.
It shows a map with the location and status of all of the user's assets,
as well as a breakdown of the asset types in the user's portfolio.
Assets for which the platform has identified upcoming balancing opportunities are highlighted.
"""
msg = ""
if "clear-session" in request.values:
clear_session()
msg = "Your session was cleared."
aggregate_groups = ["renewables", "EVSE"]
asset_groups = get_asset_group_queries(custom_additional_groups=aggregate_groups)
map_asset_groups = {}
for asset_group_name in asset_groups:
asset_group = Resource(asset_group_name)
map_asset_groups[asset_group_name] = asset_group
# Pack CDN resources (from pandas_bokeh/base.py)
bokeh_html_embedded = ""
for css in CDN.css_files:
bokeh_html_embedded += (
"""<link href="%s" rel="stylesheet" type="text/css">\n""" % css
)
for js in CDN.js_files:
bokeh_html_embedded += """<script src="%s"></script>\n""" % js
return render_flexmeasures_template(
"views/dashboard.html",
message=msg,
bokeh_html_embedded=bokeh_html_embedded,
mapboxAccessToken=current_app.config.get("MAPBOX_ACCESS_TOKEN", ""),
map_center=get_center_location(db, user=current_user),
asset_groups=map_asset_groups,
aggregate_groups=aggregate_groups,
)
|
79146
|
from keras import backend as K
from keras.layers import LSTM, time_distributed_dense
from keras import initializations, activations, regularizers
from keras.engine import InputSpec
# LSTM with Layer Normalization as described in:
# https://arxiv.org/pdf/1607.06450v1.pdf
# page 13, equation (20), (21), and (22)
class LSTM_LN(LSTM):
def __init__(self, output_dim, **kwargs):
super(LSTM_LN, self).__init__(output_dim, **kwargs)
def norm(self, xs, norm_id):
mu = K.mean(xs, axis=-1, keepdims=True)
sigma = K.sqrt(K.var(xs, axis=-1, keepdims=True) + 1e-3)
xs = self.gs[norm_id] * (xs - mu) / (sigma + 1e-3) + self.bs[norm_id]
return xs
def build(self, input_shape):
super(LSTM_LN, self).build(input_shape)
self.gs, self.bs = [], []
for i in xrange(3):
f = 1 if i == 2 else 4
self.gs += [ K.ones((f*self.output_dim,), name='{}_g%i'.format(self.name, i)) ]
self.bs += [ K.zeros((f*self.output_dim,), name='{}_b%d'.format(self.name, i)) ]
self.trainable_weights += self.gs + self.bs
def step(self, x, states):
h_tm1 = states[0]
c_tm1 = states[1]
B_U = states[2]
B_W = states[3]
if self.consume_less == 'gpu':
z = self.norm(K.dot(x * B_W[0], self.W), 0) + self.norm(K.dot(h_tm1 * B_U[0], self.U), 1) + self.b
z0 = z[:, :self.output_dim]
z1 = z[:, self.output_dim: 2 * self.output_dim]
z2 = z[:, 2 * self.output_dim: 3 * self.output_dim]
z3 = z[:, 3 * self.output_dim:]
i = self.inner_activation(z0)
f = self.inner_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.inner_activation(z3)
else:
assert (False)
if self.consume_less == 'cpu':
x_i = x[:, :self.output_dim]
x_f = x[:, self.output_dim: 2 * self.output_dim]
x_c = x[:, 2 * self.output_dim: 3 * self.output_dim]
x_o = x[:, 3 * self.output_dim:]
elif self.consume_less == 'mem':
x_i = K.dot(x * B_W[0], self.W_i) + self.b_i
x_f = K.dot(x * B_W[1], self.W_f) + self.b_f
x_c = K.dot(x * B_W[2], self.W_c) + self.b_c
x_o = K.dot(x * B_W[3], self.W_o) + self.b_o
else:
raise Exception('Unknown `consume_less` mode.')
i = self.inner_activation(x_i + K.dot(h_tm1 * B_U[0], self.U_i))
f = self.inner_activation(x_f + K.dot(h_tm1 * B_U[1], self.U_f))
c = f * c_tm1 + i * self.activation(x_c + K.dot(h_tm1 * B_U[2], self.U_c))
o = self.inner_activation(x_o + K.dot(h_tm1 * B_U[3], self.U_o))
h = o * self.activation(self.norm(c, 2))
return h, [h, c]
|
79161
|
import subprocess
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=4)
parser.add_argument("--epochs", default=10)
args = parser.parse_args()
cl = " ".join([
"python train.py",
"--dataset", os.path.join("data", "voc2012_train.tfrecord"),
"--val_dataset", os.path.join("data", "voc2012_val.tfrecord"),
"--classes", os.path.join("data", "voc2012.names"),
"--num_classes 20",
"--mode eager_tf",
"--transfer none",
f"--batch_size {args.batch_size}",
f"--epochs {args.epochs}"
])
subprocess.run(cl, shell=True, check=True)
|
79269
|
import astropy.units as u
from astropy.coordinates import Distance
from agnpy.emission_regions import Blob
import matplotlib.pyplot as plt
from agnpy.utils.plot import load_mpl_rc
# matplotlib adjustments
load_mpl_rc()
# set the spectrum normalisation (total energy in electrons in this case)
spectrum_norm = 1e48 * u.Unit("erg")
# define the spectral function parametrisation through a dictionary
spectrum_dict = {
"type": "PowerLaw",
"parameters": {"p": 2.8, "gamma_min": 1e2, "gamma_max": 1e7},
}
# set the remaining quantities defining the blob
R_b = 1e16 * u.cm
B = 1 * u.G
z = Distance(1e27, unit=u.cm).z
delta_D = 10
Gamma = 10
blob = Blob(R_b, z, delta_D, Gamma, B, spectrum_norm, spectrum_dict)
# plot the electron distribution
blob.plot_n_e(gamma_power=2)
plt.show()
|
79277
|
import matplotlib.pyplot as plt
import numpy
from PIL import Image
import sys
import cv2
import time
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
import glob
import os
method = 'threshold'
#method = 'threshold_adp'
#method = 'backSub'
#method = 'kmeans'
dataset = 'beach'
save_frame = -1 #98, 130
min_cluster = 10
for i in range(len(sys.argv)-1):
if sys.argv[i]=='--method':
method = sys.argv[i+1]
elif sys.argv[i]=='--dataset':
dataset = sys.argv[i+1]
elif sys.argv[i]=='--save_frame':
save_frame = int(sys.argv[i+1])
elif sys.argv[i]=='--min_cluster':
min_cluster = int(sys.argv[i+1])
backSub = cv2.createBackgroundSubtractorMOG2()
#backSub = cv2.createBackgroundSubtractorKNN()
image_id = 1
fig = plt.figure(figsize=(20,30))
try:
xbound, ybound, imwidth, imheight = [int(t) for t in open('dataset/%s/params.txt'%dataset).readline().split()]
except ValueError:
xbound, ybound, imscale = [int(t) for t in open('dataset/%s/params.txt'%dataset).readline().split()]
imwidth = imheight = imscale
num_samples = len(glob.glob('dataset/%s/label*.png'%dataset))
num_test = num_samples - int(num_samples*0.8)
test_idx = num_samples - num_test + 1
tp = 0
fp = 0
fn = 0
obj_tp = 0
obj_fp = 0
obj_fn = 0
viz = '--viz' in sys.argv
zoomed_in = True
comp_time = []
while True:
if method!='backSub' and image_id < test_idx:
image_id += 1
continue
image_filename = 'dataset/%s/%d.png' % (dataset,image_id)
label_filename = 'dataset/%s/label%d.png'%(dataset,image_id)
if os.path.exists(image_filename) and os.path.exists(label_filename):
I = numpy.array(Image.open(image_filename))
if len(I.shape)>2:
I = numpy.mean(I, axis=2)
else:
break
gt = numpy.array(Image.open(label_filename))
gt = gt > 0
dt = numpy.zeros(I.shape, dtype=bool)
image_np = I[ybound:ybound+imheight, xbound:xbound+imwidth]
t1 = time.time()
if method=='threshold':
Isub = image_np.astype(numpy.uint8)
val, mask = cv2.threshold(Isub,75 if dataset=='beach' else 85 if dataset=='shore' else 120,255,cv2.THRESH_BINARY)
elif method=='threshold_adp':
Isub = image_np.astype(numpy.uint8)
blur = cv2.medianBlur(Isub,5)
if dataset=='beach':
mask = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,15,-5)
elif dataset=='shore':
mask = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,15,-8)
else:
mask = cv2.adaptiveThreshold(blur,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,15,-10)
elif method=='backSub':
if dataset=='combined' and image_id in [97, 225, 249]:
backSub = cv2.createBackgroundSubtractorMOG2()
mask = backSub.apply(image_np)
Image.fromarray(mask.astype(numpy.uint8), mode='L').save('dataset/%s/backSub/%d.png'%(dataset,image_id))
elif method=='kmeans':
window_size = 15 if dataset=='beach' or dataset=='shore' else 100
margin = 10 if dataset=='beach' or dataset=='shore' else 100
Isub = image_np.copy()
#start with mean shift
centerX = 0
centerY = 0
centerVal = Isub[centerY, centerX]
peaks = []
peakVal = []
while True:
while True:
x1 = max(0,centerX-window_size)
x2 = min(Isub.shape[1],centerX+window_size)
y1 = max(0,centerY-window_size)
y2 = min(Isub.shape[0],centerY+window_size)
Itmp = Isub[y1:y2,x1:x2]
maxVal = Itmp.max()
# print(centerX,centerY,centerVal,maxVal)
if maxVal > centerVal:
dy, dx = numpy.unravel_index(numpy.argmax(Itmp), Itmp.shape)
centerY = y1+dy
centerX = x1+dx
centerVal = maxVal
Isub[y1:y2,x1:x2] = 0
else:
peaks.append([centerX,centerY])
peakVal.append(centerVal)
Isub[y1:y2,x1:x2] = 0
# print('Found peak (%d,%d) at %d'%(centerX,centerY,centerVal))
break
valid_idx = numpy.array(numpy.nonzero(Isub)).T
if len(valid_idx) > 0:
centerY, centerX = valid_idx[0]
centerVal = Isub[centerY, centerX]
else:
break
kmeans = KMeans(n_clusters=2).fit(numpy.array(peakVal).reshape(-1,1))
# print(kmeans.cluster_centers_, numpy.sum(kmeans.labels_==0), numpy.sum(kmeans.labels_==1))
target_label = numpy.argmax(kmeans.cluster_centers_)
if dataset=='beach':
peaks = numpy.array(peaks)[numpy.array(peakVal)>100]
elif dataset=='shore':
peaks = numpy.array(peaks)[numpy.array(peakVal)>85]
else:
peaks = numpy.array(peaks)[kmeans.labels_ == target_label]
Isub = image_np.copy()
mask = numpy.zeros(Isub.shape, dtype=bool)
for x,y in peaks:
xl = max(0,x-margin)
xr = min(Isub.shape[1],x+margin)
yl = max(0,y-margin)
yr = min(Isub.shape[0],y+margin)
cropped = Isub[yl:yr, xl:xr]
kmeans = KMeans(n_clusters=2).fit(cropped.reshape(-1,1))
# print('kmeans %.2f (%d) %.2f (%d)'%(kmeans.cluster_centers_[0], numpy.sum(kmeans.labels_==0), kmeans.cluster_centers_[1], numpy.sum(kmeans.labels_==1)))
target_label = numpy.argmax(kmeans.cluster_centers_)
M = kmeans.labels_.reshape(cropped.shape)==target_label
ym, xm = numpy.nonzero(M)
ym += yl
xm += xl
mask[ym,xm] = True
t2 = time.time()
dt[ybound:ybound+imheight,xbound:xbound+imwidth] = mask
err_viz = numpy.zeros((image_np.shape[0], image_np.shape[1], 3), dtype=numpy.uint8)
if image_id < test_idx:
image_id += 1
continue
gt_sub = gt[ybound:ybound+imheight, xbound:xbound+imwidth] > 0
dt_sub = dt[ybound:ybound+imheight, xbound:xbound+imwidth]
current_tp = numpy.logical_and(gt_sub,dt_sub)
current_fp = numpy.logical_and(numpy.logical_not(gt_sub),dt_sub)
current_fn = numpy.logical_and(gt_sub,numpy.logical_not(dt_sub))
err_viz[current_tp] = [0,255,0]
err_viz[current_fp] = [0,0,255]
err_viz[current_fn] = [255,0,0]
current_tp = numpy.sum(current_tp)
current_fp = numpy.sum(current_fp)
current_fn = numpy.sum(current_fn)
prc = 1.0*current_tp/(current_tp+current_fp+1)
rcl = 1.0*current_tp/(current_tp+current_fn+1)
tp += current_tp
fp += current_fp
fn += current_fn
ret, gt_com = cv2.connectedComponents(gt_sub.astype(numpy.uint8))
ret, dt_com = cv2.connectedComponents(dt_sub.astype(numpy.uint8))
num_gt = 0
num_dt = 0
for i in range(1, gt_com.max()+1):
if numpy.sum(gt_com==i) > min_cluster:
num_gt += 1
gt_com[gt_com==i] = num_gt
else:
gt_com[gt_com==i] = 0
for i in range(1, dt_com.max()+1):
if numpy.sum(dt_com==i) > min_cluster:
num_dt += 1
dt_com[dt_com==i] = num_dt
else:
dt_com[dt_com==i] = 0
current_tp = 0
dt_matched = numpy.zeros(num_dt, dtype=bool)
for i in range(1, gt_com.max()+1):
for j in range(1, dt_com.max()+1):
if dt_matched[j-1]:
continue
m1 = gt_com==i
m2 = dt_com==j
iou = 1.0 * numpy.sum(numpy.logical_and(m1, m2)) / numpy.sum(numpy.logical_or(m1, m2))
if iou > 0:
current_tp += 1
dt_matched[j-1] = True
break
current_fp = numpy.sum(dt_matched==0)
current_fn = num_gt - current_tp
obj_tp += current_tp
obj_fp += current_fp
obj_fn += current_fn
obj_prc = 1.0 * current_tp / (current_tp + current_fp) if current_tp > 0 else 0
obj_rcl = 1.0 * current_tp / (current_tp + current_fn) if current_tp > 0 else 0
gt_viz = numpy.zeros((gt_sub.shape[0], gt_sub.shape[1], 3), dtype=numpy.uint8)
for i in range(1, gt_com.max()+1):
c = numpy.random.randint(0,255,3)
gt_viz[gt_com==i] = c
my, mx = numpy.nonzero(gt_com==i)
x1 = max(mx.min() - 5, 0)
x2 = min(mx.max() + 5, gt_viz.shape[1] - 1)
y1 = max(my.min() - 5, 0)
y2 = min(my.max() + 5, gt_viz.shape[0] - 1)
gt_viz[y1, x1:x2, :] = [255,255,0]
gt_viz[y2, x1:x2, :] = [255,255,0]
gt_viz[y1:y2, x1, :] = [255,255,0]
gt_viz[y1:y2, x2, :] = [255,255,0]
dt_viz = numpy.zeros((dt_sub.shape[0], dt_sub.shape[1], 3), dtype=numpy.uint8)
for i in range(1, dt_com.max()+1):
c = numpy.random.randint(0,255,3)
dt_viz[dt_com==i] = c
my, mx = numpy.nonzero(dt_com==i)
x1 = max(mx.min() - 5, 0)
x2 = min(mx.max() + 5, dt_viz.shape[1] - 1)
y1 = max(my.min() - 5, 0)
y2 = min(my.max() + 5, dt_viz.shape[0] - 1)
dt_viz[y1, x1:x2, :] = [255,255,0]
dt_viz[y2, x1:x2, :] = [255,255,0]
dt_viz[y1:y2, x1, :] = [255,255,0]
dt_viz[y1:y2, x2, :] = [255,255,0]
comp_time.append(t2 - t1)
print('Image #%d Precision:%.2f/%.2f Recall:%.2f/%.2f (%.2fs)'%(image_id, prc,obj_prc,rcl,obj_rcl, t2-t1))
if image_id == save_frame:
Image.fromarray(image_np.astype(numpy.uint8), mode='L').save('results/original_%d.png'%save_frame)
Image.fromarray(dt_viz, mode='RGB').save('results/detected_%s_%d.png'%(method, save_frame))
Image.fromarray(gt_viz, mode='RGB').save('results/ground_truth_%d.png'%save_frame)
print('save_frame',save_frame)
sys.exit(1)
if viz:
plt.clf()
plt.subplot(2,2,1)
plt.imshow(image_np if zoomed_in else I, cmap='gray')
plt.title('Image #%d'%image_id)
plt.subplot(2,2,2)
plt.imshow(gt_sub if zoomed_in else gt, cmap='gray')
plt.subplot(2,2,3)
plt.imshow(dt_viz if zoomed_in else dt, cmap='gray')
plt.subplot(2,2,4)
plt.imshow(gt_viz, cmap='gray')
plt.pause(0.5)
image_id += 1
P = 1.0 * tp / (tp + fp)
R = 1.0 * tp / (tp + fn)
F = 2.0 * P * R / (P + R)
oP = 1.0 * obj_tp / (obj_tp + obj_fp)
oR = 1.0 * obj_tp / (obj_tp + obj_fn)
oF = 2.0 * oP * oR / (oP + oR)
print('Overall Precision:%.3f/%.3f Recall:%.3f/%.3f Fscore:%.3f/%.3f (t=%.6fs)'%(P, oP, R, oR, F, oF, numpy.mean(comp_time)))
|
79298
|
import numpy as np
from supervised.algorithms.knn import KNeighborsAlgorithm, KNeighborsRegressorAlgorithm
import optuna
from supervised.utils.metric import Metric
from supervised.algorithms.registry import BINARY_CLASSIFICATION
from supervised.algorithms.registry import MULTICLASS_CLASSIFICATION
from supervised.algorithms.registry import REGRESSION
class KNNObjective:
def __init__(
self,
ml_task,
X_train,
y_train,
sample_weight,
X_validation,
y_validation,
sample_weight_validation,
eval_metric,
n_jobs,
random_state,
):
self.ml_task = ml_task
self.X_train = X_train
self.y_train = y_train
self.sample_weight = sample_weight
self.X_validation = X_validation
self.y_validation = y_validation
self.eval_metric = eval_metric
self.n_jobs = n_jobs
self.seed = random_state
def __call__(self, trial):
try:
params = {
"n_neighbors": trial.suggest_int("n_neighbors", 1, 128),
"weights": trial.suggest_categorical(
"weights", ["uniform", "distance"]
),
"n_jobs": self.n_jobs,
"rows_limit": 100000,
"ml_task": self.ml_task,
}
Algorithm = (
KNeighborsRegressorAlgorithm
if self.ml_task == REGRESSION
else KNeighborsAlgorithm
)
model = Algorithm(params)
model.fit(self.X_train, self.y_train, sample_weight=self.sample_weight)
preds = model.predict(self.X_validation)
score = self.eval_metric(self.y_validation, preds)
if Metric.optimize_negative(self.eval_metric.name):
score *= -1.0
except optuna.exceptions.TrialPruned as e:
raise e
except Exception as e:
print("Exception in KNNObjective", str(e))
return None
return score
|
79318
|
from gtts import gTTS
def speak(text): #https://pypi.org/project/gtts/
tts = gTTS(text=text, lang='en')
tts.save("speech.mp3")
import os
import playsound
playsound.playsound("speech.mp3", True)
os.remove("speech.mp3")
'''
import pyttsx3 #https://pypi.org/project/pyttsx3/
def speak(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
'''
#pyttsx3 was showing an error, so I switched to gTTS
#but still i have not found any better alternative than pyttsx3 and gTTS
#so i will keep them both for now, You decide which one you want to use.
|
79327
|
from __future__ import annotations
import pytest
from testing.runner import and_exit
@pytest.mark.parametrize('key', ('^C', 'Enter'))
def test_replace_cancel(run, key):
with run() as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press(key)
h.await_text('cancelled')
def test_replace_invalid_regex(run):
with run() as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('(')
h.await_text("invalid regex: '('")
def test_replace_invalid_replacement(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_0')
h.await_text('replace with:')
h.press_and_enter('\\')
h.await_text('invalid replacement string')
def test_replace_cancel_at_replace_string(run):
with run() as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('hello')
h.await_text('replace with:')
h.press('^C')
h.await_text('cancelled')
@pytest.mark.parametrize('key', ('y', 'Y'))
def test_replace_actual_contents(run, ten_lines, key):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_0')
h.await_text('replace with:')
h.press_and_enter('ohai')
h.await_text('replace [yes, no, all]?')
h.press(key)
h.await_text_missing('line_0')
h.await_text('ohai')
h.await_text(' *')
h.await_text('replaced 1 occurrence')
def test_replace_sets_x_hint_properly(run, tmpdir):
f = tmpdir.join('f')
contents = '''\
beginning_line
match me!
'''
f.write(contents)
with run(str(f)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('me!')
h.await_text('replace with:')
h.press_and_enter('youuuu')
h.await_text('replace [yes, no, all]?')
h.press('y')
h.await_cursor_position(x=6, y=3)
h.press('Up')
h.press('Up')
h.await_cursor_position(x=6, y=1)
def test_replace_cancel_at_individual_replace(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter(r'line_\d')
h.await_text('replace with:')
h.press_and_enter('ohai')
h.await_text('replace [yes, no, all]?')
h.press('^C')
h.await_text('cancelled')
def test_replace_unknown_characters_at_individual_replace(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter(r'line_\d')
h.await_text('replace with:')
h.press_and_enter('ohai')
h.await_text('replace [yes, no, all]?')
h.press('?')
h.press('^C')
h.await_text('cancelled')
def test_replace_say_no_to_individual_replace(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_[135]')
h.await_text('replace with:')
h.press_and_enter('ohai')
h.await_text('replace [yes, no, all]?')
h.press('y')
h.await_text_missing('line_1')
h.press('n')
h.await_text('line_3')
h.press('y')
h.await_text_missing('line_5')
h.await_text('replaced 2 occurrences')
def test_replace_all(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter(r'line_(\d)')
h.await_text('replace with:')
h.press_and_enter(r'ohai+\1')
h.await_text('replace [yes, no, all]?')
h.press('a')
h.await_text_missing('line')
h.await_text('ohai+1')
h.await_text('replaced 10 occurrences')
def test_replace_with_empty_string(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_1')
h.await_text('replace with:')
h.press('Enter')
h.await_text('replace [yes, no, all]?')
h.press('y')
h.await_text_missing('line_1')
def test_replace_search_not_found(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('wat')
# TODO: would be nice to not prompt for a replace string in this case
h.await_text('replace with:')
h.press('Enter')
h.await_text('no matches')
def test_replace_small_window_size(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line')
h.await_text('replace with:')
h.press_and_enter('wat')
h.await_text('replace [yes, no, all]?')
with h.resize(width=8, height=24):
h.await_text('replace…')
h.press('^C')
def test_replace_height_1_highlight(run, tmpdir):
f = tmpdir.join('f')
f.write('x' * 90)
with run(str(f)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('^x+$')
h.await_text('replace with:')
h.press('Enter')
h.await_text('replace [yes, no, all]?')
with h.resize(width=80, height=1):
h.await_text_missing('xxxxx')
h.await_text('xxxxx')
h.press('^C')
def test_replace_line_goes_off_screen(run):
with run() as h, and_exit(h):
h.press(f'{"a" * 20}{"b" * 90}')
h.press('^A')
h.await_text(f'{"a" * 20}{"b" * 59}»')
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('b+')
h.await_text('replace with:')
h.press_and_enter('wat')
h.await_text('replace [yes, no, all]?')
h.await_text(f'{"a" * 20}{"b" * 59}»')
h.press('y')
h.await_text(f'{"a" * 20}wat')
h.await_text('replaced 1 occurrence')
def test_replace_undo_undoes_only_one(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line')
h.await_text('replace with:')
h.press_and_enter('wat')
h.press('y')
h.await_text_missing('line_0')
h.press('y')
h.await_text_missing('line_1')
h.press('^C')
h.press('M-u')
h.await_text('line_1')
h.await_text_missing('line_0')
def test_replace_multiple_occurrences_in_line(run):
with run() as h, and_exit(h):
h.press('baaaaabaaaaa')
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('a+')
h.await_text('replace with:')
h.press_and_enter('q')
h.await_text('replace [yes, no, all]?')
h.press('a')
h.await_text('bqbq')
def test_replace_after_wrapping(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('Down')
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_[02]')
h.await_text('replace with:')
h.press_and_enter('ohai')
h.await_text('replace [yes, no, all]?')
h.press('y')
h.await_text_missing('line_2')
h.press('y')
h.await_text_missing('line_0')
h.await_text('replaced 2 occurrences')
def test_replace_after_cursor_after_wrapping(run):
with run() as h, and_exit(h):
h.press('baaab')
h.press('Left')
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('b')
h.await_text('replace with:')
h.press_and_enter('q')
h.await_text('replace [yes, no, all]?')
h.press('n')
h.press('y')
h.await_text('replaced 1 occurrence')
h.await_text('qaaab')
def test_replace_separate_line_after_wrapping(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('Down')
h.press('Down')
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('line_[01]')
h.await_text('replace with:')
h.press_and_enter('_')
h.await_text('replace [yes, no, all]?')
h.press('y')
h.await_text_missing('line_0')
h.press('y')
h.await_text_missing('line_1')
def test_replace_with_newline_characters(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('(line)_([01])')
h.await_text('replace with:')
h.press_and_enter(r'\1\n\2')
h.await_text('replace [yes, no, all]?')
h.press('a')
h.await_text_missing('line_0')
h.await_text_missing('line_1')
h.await_text('line\n0\nline\n1\n')
def test_replace_with_multiple_newline_characters(run, ten_lines):
with run(str(ten_lines)) as h, and_exit(h):
h.press('^\\')
h.await_text('search (to replace):')
h.press_and_enter('(li)(ne)_(1)')
h.await_text('replace with:')
h.press_and_enter(r'\1\n\2\n\3\n')
h.await_text('replace [yes, no, all]?')
h.press('a')
h.await_text_missing('line_1')
h.await_text('li\nne\n1\n\nline_2')
|
79369
|
from annotation_utils.ndds.structs import NDDS_Dataset
from annotation_utils.coco.structs import COCO_Category_Handler, COCO_Category
from annotation_utils.coco.structs import COCO_Dataset
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir
from typing import cast
from annotation_utils.coco.structs import COCO_Image, COCO_Annotation
from common_utils.path_utils import get_rootname_from_filename, get_extension_from_filename
from common_utils.common_types.point import Point2D
from tqdm import tqdm
import cv2
from logger import logger
from typing import List
target_src_dir = '/home/clayton/workspace/prj/data_keep/data/ndds/bolt_markMap_2020.08.18-13.03.35'
target_dst_dir = 'bolt_kpt'
make_dir_if_not_exists(target_dst_dir)
delete_all_files_in_dir(target_dst_dir)
# Load NDDS Dataset
logger.info('Loading NDDS Dataset')
ndds_dataset = NDDS_Dataset.load_from_dir(
json_dir=target_src_dir,
show_pbar=True
)
delete_idx_list = []
# Fix NDDS Dataset naming so that it follows convention. (This is not necessary if the NDDS dataset already follows the naming convention.)
for i, frame in enumerate(ndds_dataset.frames):
for ann_obj in frame.ndds_ann.objects:
if ann_obj.class_name.startswith('bolt'):
if ann_obj.visibility == 0 :
delete_idx_list.append(i)
obj_type, obj_name = 'seg', 'bolt-roi'
instance_name = ann_obj.class_name.replace('bolt', '')
ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'
elif ann_obj.class_name=='mark-inner':
obj_type, obj_name = 'seg', 'mark-inner'
instance_name = str(0)
ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'
elif ann_obj.class_name=='mark-middle':
obj_type, obj_name = 'seg', 'mark-middle'
instance_name = str(0)
ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'
elif ann_obj.class_name=='mark-outer':
obj_type, obj_name = 'seg', 'mark-outer'
instance_name = str(0)
ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'
# keypoints
elif ann_obj.class_name.startswith('kpt-ia'):
obj_type, obj_name = 'kpt', 'mark-inner'
contained_name = 'ia'
instance_name = str(0)
ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}_{contained_name}'
elif ann_obj.class_name.startswith('kpt-ib'):
obj_type, obj_name = 'kpt', 'mark-inner'
contained_name = 'ib'
instance_name = str(0)
ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}_{contained_name}'
elif ann_obj.class_name.startswith('kpt-oa'):
obj_type, obj_name = 'kpt', 'mark-outer'
contained_name = 'oa'
instance_name = str(0)
ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}_{contained_name}'
elif ann_obj.class_name.startswith('kpt-ob'):
obj_type, obj_name = 'kpt', 'mark-outer'
contained_name = 'ob'
instance_name = str(0)
ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}_{contained_name}'
for idx in delete_idx_list[::-1]:
del ndds_dataset.frames[idx]
print(f"ID deleted {idx}")
# Bolt ROI Dataset Creation
logger.info('Creating Bolt ROI Dataset')
bolt_roi_categories = COCO_Category_Handler()
print(f"Bolt_roi_categories :{bolt_roi_categories}")
bolt_roi_categories.append(
COCO_Category(
id=len(bolt_roi_categories),
name='bolt-roi'
)
)
print(f"Bolt_roi_categories :{bolt_roi_categories}")
bolt_roi_dataset = COCO_Dataset.from_ndds(
ndds_dataset=ndds_dataset,
categories=bolt_roi_categories,
naming_rule='type_object_instance_contained', delimiter='_',
ignore_unspecified_categories=True,
show_pbar=True,
bbox_area_threshold=1,
default_visibility_threshold=0.01,
visibility_threshold_dict={'bolt-roi': 0.01},
allow_unfound_seg=False,
class_merge_map={
'seg_mark-inner_0': 'seg_bolt-roi_0',
'seg_mark-middle_0': 'seg_bolt-roi_0',
'seg_mark-outer_0': 'seg_bolt-roi_0'
}
)
bolt_roi_dst_dir = f'{target_dst_dir}/bolt_roi'
make_dir_if_not_exists(bolt_roi_dst_dir)
bolt_roi_dataset.move_images(
dst_img_dir=bolt_roi_dst_dir,
preserve_filenames=True, overwrite_duplicates=False, update_img_paths=True, overwrite=True,
show_pbar=True
)
bolt_roi_dataset.save_to_path(f'{bolt_roi_dst_dir}/output.json', overwrite=True)
#preview
# bolt_roi_dataset.display_preview(show_details=True, window_name='Bolt ROI')
# Mark (Not Cropped) Dataset Creation
logger.info('Creating Mark Dataset (Not Cropped Version)')
mark_categories = COCO_Category_Handler()
mark_categories.append(
COCO_Category(
id=len(mark_categories),
name='mark-inner',
keypoints=["ia","ib"],
skeleton=[[0,1]]
)
)
mark_categories.append(
COCO_Category(
id=len(mark_categories),
name='mark-outer',
keypoints=["oa","ob"],
skeleton=[[0,1]]
)
)
mark_dataset = COCO_Dataset.from_ndds(
ndds_dataset=ndds_dataset,
categories=mark_categories,
naming_rule='type_object_instance_contained', delimiter='_',
ignore_unspecified_categories=True,
show_pbar=True,
bbox_area_threshold=1,
default_visibility_threshold=0.10,
visibility_threshold_dict={'bolt-roi': 0.01},
allow_unfound_seg=False,
class_merge_map={
'seg_mark-middle_0': 'seg_mark-inner_0',
#'seg_mark_2': 'seg_mark_0'
}
)
# mark_dataset.save_to_path('uncropped_mark.json', overwrite=True)
mark_dataset.display_preview(show_details=True, window_name='Mark')
# Mark (Cropped) Dataset Creation
logger.info('Creating Mark Dataset (Cropped Version)')
marker_dst_dir = f'{target_dst_dir}/marker'
make_dir_if_not_exists(marker_dst_dir)
cropped_mark_dataset = COCO_Dataset.new()
cropped_mark_dataset.categories = mark_categories.copy()
cropped_mark_dataset.licenses = mark_dataset.licenses.copy()
crop_pbar = tqdm(total=len(bolt_roi_dataset.images), unit='image(s)')
crop_pbar.set_description('Cropping')
for roi_image in bolt_roi_dataset.images:
orig_img = cv2.imread(roi_image.coco_url)
roi_anns = bolt_roi_dataset.annotations.get_annotations_from_imgIds([roi_image.id])
mark_images = mark_dataset.images.get_images_from_file_name(roi_image.file_name)
assert len(mark_images) == 1
mark_image = mark_images[0]
mark_anns = mark_dataset.annotations.get_annotations_from_imgIds(mark_image.id)
#assert len(roi_anns) == len(mark_anns)
for i, roi_ann in enumerate(roi_anns):
roi_img = roi_ann.bbox.crop_from(orig_img)
img_rootname = get_rootname_from_filename(roi_image.file_name)
img_extension = get_extension_from_filename(roi_image.file_name)
save_filename = f'{img_rootname}_{i}.{img_extension}'
save_path = f'{marker_dst_dir}/{save_filename}'
cv2.imwrite(save_path, roi_img)
cropped_coco_image = COCO_Image.from_img_path(
img_path=save_path,
license_id=cropped_mark_dataset.licenses[0].id,
image_id=len(cropped_mark_dataset.images)
)
cropped_mark_dataset.images.append(cropped_coco_image)
mark_ann_found = False
mark_ann_list = cast(List[COCO_Annotation], [])
for i in list(range(len(mark_anns)))[::-1]:
if roi_ann.bbox.contains(mark_anns[i].bbox):
mark_ann_found = True
# mark_ann = mark_anns[i].copy()
mark_ann_list.append( mark_anns[i].copy())
del mark_anns[i]
if not mark_ann_found:
raise Exception
for mark_ann in mark_ann_list:
mark_ann.segmentation = mark_ann.segmentation - Point2D(x=roi_ann.bbox.xmin, y=roi_ann.bbox.ymin)
mark_ann.bbox = mark_ann.segmentation.to_bbox()
mark_ann.keypoints = mark_ann.keypoints - Point2D(x=roi_ann.bbox.xmin, y=roi_ann.bbox.ymin)
cropped_coco_ann = COCO_Annotation(
id=len(cropped_mark_dataset.annotations),
category_id=mark_ann.category_id,
image_id=cropped_coco_image.id,
segmentation=mark_ann.segmentation,
bbox=mark_ann.bbox,
area=mark_ann.bbox.area(),
keypoints=mark_ann.keypoints,
num_keypoints=len(mark_ann.keypoints),
keypoints_3d=mark_ann.keypoints_3d
)
cropped_mark_dataset.annotations.append(cropped_coco_ann)
crop_pbar.update()
cropped_mark_dataset.save_to_path(f'{marker_dst_dir}/output.json')
cropped_mark_dataset.display_preview(show_details=True)
cropped_mark_dataset.save_video(
save_path=f'{marker_dst_dir}/preview.mp4',
fps=5,
show_details=True
)
|
79377
|
import asyncio
from abc import ABC, abstractmethod
from .colors import BLACK_ON_BLACK
from .io import KeyPressEvent, MouseEvent, PasteEvent, io
from .widgets._root import _Root
RESIZE_POLL_INTERVAL = 0.5 # Seconds between polling for resize events.
RENDER_INTERVAL = 0 # Seconds between screen renders.
class App(ABC):
"""
Base for creating terminal applications.
Parameters
----------
exit_key : KeyPressEvent | None, default: KeyPressEvent.ESCAPE
Quit the app when this key is pressed.
default_char : str, default: " "
Default background character for root widget.
default_color_pair : ColorPair, default: BLACK_ON_BLACK
Default background color pair for root widget.
title : str | None, default: None
Set terminal title (if supported).
"""
def __init__(
self,
*,
exit_key=KeyPressEvent.ESCAPE,
default_char=" ",
default_color_pair=BLACK_ON_BLACK,
title=None
):
self.exit_key = exit_key
self.default_char = default_char
self.default_color_pair = default_color_pair
self.title = title
@abstractmethod
async def on_start(self):
"""
Coroutine scheduled when app is run.
"""
def run(self):
"""
Run the app.
"""
try:
asyncio.run(self._run_async())
except asyncio.CancelledError:
pass
def exit(self):
for task in asyncio.all_tasks():
task.cancel()
async def _run_async(self):
"""
Build environment, create root, and schedule app-specific tasks.
"""
with io() as (env_in, env_out):
self.root = root = _Root(
app=self,
env_out=env_out,
default_char=self.default_char,
default_color_pair=self.default_color_pair,
)
if self.title:
env_out.set_title(self.title)
dispatch_press = root.dispatch_press
dispatch_click = root.dispatch_click
dispatch_paste = root.dispatch_paste
def read_from_input():
"""
Read and process input.
"""
for key in env_in.read_keys():
match key:
case self.exit_key:
return self.exit()
case MouseEvent():
dispatch_click(key)
case KeyPressEvent():
dispatch_press(key)
case PasteEvent():
dispatch_paste(key)
async def poll_size():
"""
Poll terminal size every `RESIZE_POLL_INTERVAL` seconds.
"""
size = env_out.get_size()
resize = root.resize
while True:
await asyncio.sleep(RESIZE_POLL_INTERVAL)
new_size = env_out.get_size()
if size != new_size:
resize(new_size)
size = new_size
async def auto_render():
"""
Render screen every `RENDER_INTERVAL` seconds.
"""
render = root.render
while True:
await asyncio.sleep(RENDER_INTERVAL)
render()
with env_in.raw_mode(), env_in.attach(read_from_input):
await asyncio.gather(
poll_size(),
auto_render(),
self.on_start(),
)
def add_widget(self, widget):
self.root.add_widget(widget)
def add_widgets(self, *widgets):
self.root.add_widgets(*widgets)
@property
def children(self):
return self.root.children
|
79435
|
import unittest
import numpy as np
import logging
from dsbox.ml.neural_networks.keras_factory.text_models import LSTMFactory
from dsbox.ml.neural_networks.processing.workflow import TextNeuralNetPipeline, ImageNeuralNetPipeline
logging.getLogger("tensorflow").setLevel(logging.WARNING)
np.random.seed(42)
class TestPipeline(unittest.TestCase):
def test_fit_predict_text_nn_pipeline_should_return_some_result(self):
# given
x_train = np.array(['this is really really awesome !',
'it is so awesome !',
'that sucks']
)
y_train = np.array([1, 1, 0])
# when
model = TextNeuralNetPipeline(factory_class=LSTMFactory, num_labels=2)
model.fit(x_train, y_train, verbose=0)
x_test = np.array(['it is really awesome !'])
y_pred = model.predict(x_test)
# then
self.assertIsNotNone(y_pred)
def test_fit_predict_proba_text_nn_pipeline_should_return_some_result(self):
# given
x_train = np.array(['this is really really awesome !',
'it is so awesome !',
'that sucks']
)
y_train = np.array([1, 1, 0])
# when
model = TextNeuralNetPipeline(factory_class=LSTMFactory, num_labels=2)
model.fit(x_train, y_train, verbose=0)
x_test = np.array(['it is really awesome !'])
y_pred = model.predict_proba(x_test)[0]
# then
self.assertIsNotNone(y_pred)
def test_fit_image_nn_workflow_should_set_params_automatically(self):
# given
workflow = ImageNeuralNetPipeline(weights="imagenet")
# when
workflow.fit()
# then
self.assertTupleEqual((299, 299), workflow.img_size_)
self.assertEqual("block14_sepconv2_act", workflow.last_conv_layer_name_)
self.assertListEqual(["avg_pool", "predictions"], workflow.classifier_layer_names_)
|
79439
|
from kaa.keyboard import *
# Todo: Splitting key bind table does not make sense.
# Put them together.
# application commands
app_keys = {
(alt, 'm'): 'app.mainmenu',
(alt, '/'): 'app.mainmenu',
f1: 'app.mainmenu',
f9: 'app.global.prev',
f10: 'app.global.next',
(alt, 'w'): 'menu.window',
(alt, 'z'): 'tools.suspend',
}
# default cursor commands
cursor_keys = {
left: 'cursor.left',
right: 'cursor.right',
up: 'cursor.up',
down: 'cursor.down',
(ctrl, left): 'cursor.word-left',
(ctrl, right): 'cursor.word-right',
pagedown: 'cursor.pagedown',
pageup: 'cursor.pageup',
(shift, left): 'cursor.left.select',
(shift, right): 'cursor.right.select',
(shift, up): 'cursor.up.select',
(shift, down): 'cursor.down.select',
(shift, ctrl, left): 'cursor.word-left.select',
(shift, ctrl, right): 'cursor.word-right.select',
home: ('cursor.home'),
end: ('cursor.end'),
(shift, home): 'cursor.home.select',
(shift, end): 'cursor.end.select',
(ctrl, home): 'cursor.top-of-file',
(ctrl, end): 'cursor.end-of-file',
(shift, ctrl, home): 'cursor.top-of-file.select',
(shift, ctrl, end): 'cursor.end-of-file.select',
(alt, 'a'): 'selection.all',
(alt, 'c'): 'selection.expand-sel',
(ctrl, '@'): 'selection.set-mark',
(alt, '#'): 'selection.set-rectangle-mark',
}
# edit commands
edit_command_keys = {
backspace: 'edit.backspace',
(ctrl, 'h'): 'edit.backspace',
(alt, 'h'): 'edit.backspace.word',
delete: 'edit.delete',
(ctrl, 'd'): 'edit.delete',
(alt, 'd'): 'edit.delete.word',
(ctrl, 'k'): 'edit.delete.line',
(alt, 'k'): 'edit.delete.currentline',
'\r': 'edit.newline',
'\n': 'edit.newline',
(ctrl, 'x'): 'edit.cut',
(ctrl, 'c'): 'edit.copy',
(ctrl, 'v'): 'edit.paste',
(ctrl, 'z'): 'edit.undo',
(ctrl, 'y'): 'edit.redo',
(alt, 'v'): 'edit.clipboard-history',
}
addtional_edit_command_keys = {
(ctrl, 'g'): 'cursor.go-to-line',
tab: 'edit.indent',
(shift, tab): 'edit.dedent',
((ctrl, 'u'), (alt, '!')): 'tools.execute-shell-command',
(ctrl, 'o'): 'edit.word-complete',
}
# macro commands
macro_command_keys = {
f6: 'macro.toggle-record',
f5: 'macro.run',
}
# rerun commands
rerun_keys = {
(alt, '.'): 'command.rerun',
}
# search commands
search_command_keys = {
(ctrl, 's'): 'search.showsearch',
(alt, 's'): 'search.showreplace',
f2: 'search.prev',
f3: 'search.next',
}
# emacs like keys
emacs_keys = {
(ctrl, 'b'): 'cursor.left',
(ctrl, 'f'): 'cursor.right',
(ctrl, 'p'): 'cursor.prev-line',
(ctrl, 'n'): 'cursor.next-line',
(alt, 'b'): 'cursor.word-left',
(alt, 'f'): 'cursor.word-right',
(ctrl, 'a'): 'cursor.top-of-line',
(ctrl, 'e'): 'cursor.end-of-line',
(ctrl, '^'): 'cursor.first-letter-of-line',
(alt, 'n'): 'cursor.pagedown',
(alt, 'p'): 'cursor.pageup',
(alt, '<'): 'cursor.top-of-file',
(alt, '>'): 'cursor.end-of-file',
}
# vi like commands
command_mode_keys = {
# editmode change
'i': 'editmode.insert',
'R': 'editmode.replace',
'A': ('editmode.insert', 'cursor.end-of-line'),
'v': ('editmode.visual', 'selection.set-mark'),
'V': ('editmode.visual-linewise', 'selection.set-linewise-mark'),
# cursor command
'h': 'cursor.left',
'l': 'cursor.right',
'k': 'cursor.up',
'j': 'cursor.down',
'w': 'cursor.word-right',
'b': 'cursor.word-left',
'0': 'cursor.top-of-line',
'^': 'cursor.first-letter-of-line',
'$': 'cursor.end-of-line',
'gg': 'cursor.top-of-file',
'G': 'cursor.end-of-file',
(ctrl, 'b'): 'cursor.pageup',
(ctrl, 'f'): 'cursor.pagedown',
# edit
'r': 'edit.replace-next-char',
'x': 'edit.delete',
'd': 'edit.delete-next-move',
# undo/redo
'u': 'edit.undo',
(ctrl, 'r'): 'edit.redo',
# clipboard
'y': ('edit.copy', 'editmode.command'),
}
visual_mode_keys = {
left: 'cursor.left',
right: 'cursor.right',
up: 'cursor.up',
down: 'cursor.down',
pagedown: 'cursor.pagedown',
pageup: 'cursor.pageup',
'h': 'cursor.left',
'l': 'cursor.right',
'k': 'cursor.up',
'j': 'cursor.down',
'gg': 'cursor.top-of-file',
'G': 'cursor.end-of-file',
'y': ('edit.copy', 'selection.end-cursor', 'editmode.command'),
}
visual_linewise_mode_keys = {
up: 'cursor.prev-line',
down: 'cursor.next-line',
'k': 'cursor.prev-line',
'j': 'cursor.next-line',
'y': ('edit.copy', 'selection.end-cursor', 'editmode.command'),
}
|
79443
|
from rest_framework import authentication, exceptions
from rest_framework.authentication import get_authorization_header
from django.utils.translation import ugettext_lazy as _
from .models import Token
class TokenAuthentication(authentication.BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token <PASSWORD>
"""
model = Token
"""
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = get_authorization_header(request).split()
if not auth or auth[0].lower() != b'token':
return None
if len(auth) == 1:
msg = _('Invalid token header. No credentials provided.')
raise exceptions.AuthenticationFailed(msg)
elif len(auth) > 2:
msg = _('Invalid token header. Token string should not contain '
'spaces.')
raise exceptions.AuthenticationFailed(msg)
try:
token = auth[1].decode()
except UnicodeError:
msg = _('Invalid token header. Token string should not contain '
'invalid characters.')
raise exceptions.AuthenticationFailed(msg)
return self.authenticate_credentials(token)
def authenticate_credentials(self, key):
try:
token = self.model.objects.get(key=key)
except self.model.DoesNotExist:
raise exceptions.AuthenticationFailed(_('Invalid token.'))
return (None, token)
def authenticate_header(self, request):
return 'Token'
|
79447
|
from genmod.vcf_tools.header_parser import HeaderParser
def test_parse_info():
## GIVEN a header object
head = HeaderParser()
assert 'MQ' not in head.info_dict
info_line = '##INFO=<ID=MQ,Number=1,Type=Float,Description="RMS Mapping Quality">'
## WHEN parsing a correct info line
head.parse_meta_data(info_line)
## THEN assert it is added to the parser
assert 'MQ' in head.info_dict
def test_parse_contig():
## GIVEN a header object
head = HeaderParser()
assert '1' not in head.contig_dict
contig_line = '##contig=<ID=1,length=249250621,assembly=b37>'
## WHEN parsing a correct info line
head.parse_meta_data(contig_line)
## THEN assert it is added to the parser
assert '1' in head.contig_dict
def test_parse_contig_no_length():
## GIVEN a header object
head = HeaderParser()
assert '1' not in head.contig_dict
contig_line = '##contig=<ID=1,assembly=b37>'
## WHEN parsing a correct info line
head.parse_meta_data(contig_line)
## THEN assert it is added to the parser
assert '1' in head.contig_dict
def test_parse_minimal_contig():
## GIVEN a header object
head = HeaderParser()
assert '1' not in head.contig_dict
contig_line = '##contig=<ID=1>'
## WHEN parsing a correct info line
head.parse_meta_data(contig_line)
## THEN assert it is added to the parser
assert '1' in head.contig_dict
|
79450
|
from .layer_norm import LayerNorm
from . import dy_model
@dy_model
class SublayerConnection:
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, model, size, p):
pc = model.add_subcollection()
self.norm = LayerNorm(pc, size)
self.p = p
self.spec = (size, p)
def __call__(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + dy.dropout(sublayer(self.norm(x)), self.p)
|
79476
|
import IPython.lib.demo as ipd
# To use, run ipython, then
#
# In [1]: %run Demos.py
# In [2]: d = ImageDemo()
# In [3]: d()
# In [4]: d()
def ImageDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/Image.py' )
def InputOutputDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/InputOutput.py' )
def MemoryManagementDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial1/MemoryManagement.py' )
def FiltersDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial2/Filters.py' )
def MorphologyDemo ():
return ipd.ClearIPDemo ( 'BasicTutorial2/Morphology.py' )
def MeasureRegionsDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/MeasureRegions.py' )
def BorderChangeDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-01-BorderChange.py' )
def NumpyDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-02-Numpy.py' )
def RidgeDetectionDemo ():
return ipd.ClearIPDemo ( 'InteractiveTutorial/05-04-RidgeDetection.py' )
|
79489
|
def extractMichilunWordpressCom(item):
'''
Parser for 'michilun.wordpress.com'
'''
bad = [
'Recommendations and Reviews',
]
if any([tmp in item['tags'] for tmp in bad]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Side Projects - Scheme of the Official Descendant', 'Scheme of the Official Descendant', 'translated'),
('Song in the Peach Blossoms', 'Song in the Peach Blossoms', 'translated'),
('Onrain (Online - The Novel)', 'Onrain (Online - The Novel)', 'translated'),
('At the End of the Wish', 'At the End of the Wish', 'translated'),
('Bringing Calamity to the Nation', 'Bringing Calamity to the Nation', 'translated'),
('Side Projects - The Flame\'s Daughter', 'The Flame\'s Daughter', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
79500
|
from itertools import tee
import numpy as np
import scipy.interpolate as intp
from scipy.signal import savgol_filter
def get_edge_bin(array):
"""Detect the edge indcies of a binary 1-D array.
Args:
array (:class:`numpy.ndarray`): A list or Numpy 1d array, with binary
(0/1) or boolean (True/False) values.
Returns:
list: A list containing starting and ending indices of the non-zero
blocks.
Examples:
.. code-block:: python
>>> a = [0,1,1,0,0,0,1,0,1]
>>> get_edge_bin(a)
[(1, 3), (6, 7), (8, 9)]
>>> b = [True, False, True, True, False, False]
>>> get_edge_bin(b)
[(0, 1), (2, 4)]
"""
array1 = np.int64(array)
array1 = np.insert(array1, 0, 0)
array1 = np.append(array1, 0)
tmp = array1 - np.roll(array1, 1)
i1_lst = np.nonzero(tmp == 1)[0] - 1
i2_lst = np.nonzero(tmp ==-1)[0] - 1
return list(zip(i1_lst, i2_lst))
def get_local_minima(x, window=None):
"""Get the local minima of a 1d array in a window.
Args:
x (:class:`numpy.ndarray`): A list or Numpy 1d array.
window (*int* or :class:`numpy.ndarray`): An odd integer or a list of
odd integers as the lengthes of searching window.
Returns:
tuple: A tuple containing:
* **index** (:class:`numpy.ndarray`): A numpy 1d array containing
indices of all local minima.
* **x[index]** (:class:`numpy.ndarray`): A numpy 1d array containing
values of all local minima.
"""
x = np.array(x)
dif = np.diff(x)
ind = dif > 0
tmp = np.logical_xor(ind, np.roll(ind,1))
idx = np.logical_and(tmp,ind)
index = np.where(idx)[0]
if window is None:
# window is not given
return index, x[index]
else:
# window is given
if isinstance(window, int):
# window is an integer
window = np.repeat(window, len(x))
elif isinstance(window, np.ndarray):
# window is a numpy array
#if np.issubdtype(window.dtype, int):
if window.dtype.type in [np.int16, np.int32, np.int64]:
pass
else:
# window are not integers
print('window array are not integers')
raise ValueError
else:
raise ValueError
if 0 in window%2:
# not all of the windows are odd
raise ValueError
halfwin_lst = (window-1)//2
index_lst = []
for i in index:
halfwin = halfwin_lst[i]
i1 = max(0, i-halfwin)
i2 = min(i+halfwin+1, len(x))
if i == x[i1:i2].argmin() + i1:
index_lst.append(i)
if len(index_lst)>0:
index_lst = np.array(index_lst)
return index_lst, x[index_lst]
else:
return np.array([]), np.array([])
def implete_none(lst):
"""Replace the None elemnets at the beginning and the end of list by auto
increment integers.
Convert the first and last few `None` elements to auto increment integers.
These integers are determined by the first and last integers in the input
array.
While the `None` elements between two integers in the input list will
remain.
Args:
lst (list): A list contaning None values.
Returns:
newlst (list): A list containing auto increment integers.
Examples:
.. code-block:: python
>>> a = [None,None,3,4,None,5,6,None,None]
>>> implete_none(a)
[1, 2, 3, 4, None, 5, 6, 7, 8]
"""
# filter the None values
notnone_lst = [v for v in lst if v is not None]
for i, v in enumerate(lst):
if v == notnone_lst[0]:
# first not-None element and its index
notnone1 = i
value1 = v
if v == notnone_lst[-1]:
# last not-None element and its index
notnone2 = i
value2 = v
newlst = []
for i,v in enumerate(lst):
if i < notnone1:
newlst.append(value1-(notnone1-i))
elif i > notnone2:
newlst.append(value2+(i-notnone2))
else:
newlst.append(v)
return newlst
def derivative(*args, **kwargs):
"""Get the first derivative of data arrays (*x*, *y*).
If **y** is not given, the first argument will be taken as **y**, and the
differential of the input array will be returned.
Args:
x (list or :class:`numpy.ndarray`): X-values of the input array (optional).
y (list or :class:`numpy.ndarray`): Y-values of the input array.
points (int): Number of points used to calculate derivative
(optional, default is 3).
Returns:
:class:`numpy.ndarray`: Derivative of the input array.
"""
if len(args) == 1:
y = np.array(args[0], dtype=np.float64)
x = np.arange(y.size)
elif len(args) == 2:
x = np.array(args[0], dtype=np.float64)
y = np.array(args[1], dtype=np.float64)
else:
raise ValueError
npts = x.size
points = kwargs.pop('points', 3)
if points == 3:
der = (np.roll(y,-1) - np.roll(y,1))/(np.roll(x,-1) - np.roll(x,1))
a = np.array([-3., 4., -1.])
der[0] = (a*y[0:3]).sum() / (a*x[0:3]).sum()
der[-1] = (-a[::-1]*y[-3:]).sum() / (-a[::-1]*x[-3:]).sum()
return der
else:
raise ValueError
def pairwise(array):
"""Return pairwises of an iterable arrary.
Args:
array (list or :class:`numpy.ndarray`): The input iterable array.
Returns:
:class:`zip`: zip objects.
"""
a, b = tee(array)
next(b, None)
return zip(a, b)
def smooth(array, points, deg):
"""Smooth an array.
Args:
array (:class:`numpy.ndarray`): Input array.
points (int): Points of smoothing.
deg (int): Degree of smoothing.
Returns:
:class:`numpy.ndarray`: smoothed array
"""
n = array.size
if points == 5:
if deg == 2:
w_2 = np.array([31., 9., -3., -5., 3.])/35.
w_1 = np.array([ 9., 13., 12., 6., -5.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
elif deg == 3:
w_2 = np.array([69., 4., -6., 4., -1.])/70.
w_1 = np.array([ 2., 27., 12., -8., 2.])/35.
w_0 = np.array([-3., 12., 17., 12., -3.])/35.
a = np.zeros((n, n))
a[0, 0:5] = w_2
a[1, 0:5] = w_1
for i in np.arange(2, n-2):
a[i, i-2:i+3] = w_0
a[-2, -5:] = w_1[::-1]
a[-1, -5:] = w_2[::-1]
result = np.matrix(a)*np.matrix(array.reshape(-1,1))
return np.array(result)[:,0]
def iterative_savgol_filter(y, winlen=5, order=3, maxiter=10,
upper_clip=None, lower_clip=None):
"""Smooth the input array with Savitzky-Golay filter with lower and/or
upper clippings.
Args:
y (:class:`numpy.ndarray`): Input array.
winlen (int): Window length of Savitzky-Golay filter.
order (int): Order of Savitzky-Gaoly filter.
maxiter (int): Maximum number of iterations.
lower_clip (float): Lower sigma-clipping value.
upper_clip (float): Upper sigma-clipping value.
Returns:
tuple: A tuple containing:
* **ysmooth** (:class:`numpy.ndarray`) – Smoothed y values.
* **yres** (:class:`numpy.ndarray`) – Residuals of y values.
* **mask** (:class:`numpy.ndarray`) – Mask of y values.
* **std** (float) – Standard deviation.
"""
x = np.arange(y.size)
mask = np.ones_like(y, dtype=np.bool)
for ite in range(maxiter):
# fill masked values in y using interpolation
f = intp.InterpolatedUnivariateSpline(x[mask], y[mask], k=3)
ysmooth = savgol_filter(f(x), window_length=winlen, polyorder=order)
yres = y - ysmooth
std = yres[mask].std()
# generate new mask
# make a copy of existing mask
new_mask = mask * np.ones_like(mask, dtype=np.bool)
# give new mask with lower and upper clipping value
if lower_clip is not None:
new_mask *= (yres > -lower_clip * std)
if upper_clip is not None:
new_mask *= (yres < upper_clip * std)
if new_mask.sum() == mask.sum():
break
mask = new_mask
return ysmooth, yres, mask, std
|
79503
|
from kaffe.tensorflow import Network
class GoogleNet(Network):
def setup(self):
(self.feed('data')
.conv(7, 7, 64, 2, 2, name='conv1_7x7_s2')
.max_pool(3, 3, 2, 2, name='pool1_3x3_s2')
.lrn(2, 2e-05, 0.75, name='pool1_norm1')
.conv(1, 1, 64, 1, 1, name='conv2_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='conv2_3x3')
.lrn(2, 2e-05, 0.75, name='conv2_norm2')
.max_pool(3, 3, 2, 2, name='pool2_3x3_s2')
.conv(1, 1, 64, 1, 1, name='inception_3a_1x1'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_3a_3x3_reduce')
.conv(3, 3, 128, 1, 1, name='inception_3a_3x3'))
(self.feed('pool2_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_3a_5x5_reduce')
.conv(5, 5, 32, 1, 1, name='inception_3a_5x5'))
(self.feed('pool2_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_3a_pool')
.conv(1, 1, 32, 1, 1, name='inception_3a_pool_proj'))
(self.feed('inception_3a_1x1',
'inception_3a_3x3',
'inception_3a_5x5',
'inception_3a_pool_proj')
.concat(3, name='inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_1x1'))
(self.feed('inception_3a_output')
.conv(1, 1, 128, 1, 1, name='inception_3b_3x3_reduce')
.conv(3, 3, 192, 1, 1, name='inception_3b_3x3'))
(self.feed('inception_3a_output')
.conv(1, 1, 32, 1, 1, name='inception_3b_5x5_reduce')
.conv(5, 5, 96, 1, 1, name='inception_3b_5x5'))
(self.feed('inception_3a_output')
.max_pool(3, 3, 1, 1, name='inception_3b_pool')
.conv(1, 1, 64, 1, 1, name='inception_3b_pool_proj'))
(self.feed('inception_3b_1x1',
'inception_3b_3x3',
'inception_3b_5x5',
'inception_3b_pool_proj')
.concat(3, name='inception_3b_output')
.max_pool(3, 3, 2, 2, name='pool3_3x3_s2')
.conv(1, 1, 192, 1, 1, name='inception_4a_1x1'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 96, 1, 1, name='inception_4a_3x3_reduce')
.conv(3, 3, 208, 1, 1, name='inception_4a_3x3'))
(self.feed('pool3_3x3_s2')
.conv(1, 1, 16, 1, 1, name='inception_4a_5x5_reduce')
.conv(5, 5, 48, 1, 1, name='inception_4a_5x5'))
(self.feed('pool3_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_4a_pool')
.conv(1, 1, 64, 1, 1, name='inception_4a_pool_proj'))
(self.feed('inception_4a_1x1',
'inception_4a_3x3',
'inception_4a_5x5',
'inception_4a_pool_proj')
.concat(3, name='inception_4a_output')
.conv(1, 1, 160, 1, 1, name='inception_4b_1x1'))
(self.feed('inception_4a_output')
.conv(1, 1, 112, 1, 1, name='inception_4b_3x3_reduce')
.conv(3, 3, 224, 1, 1, name='inception_4b_3x3'))
(self.feed('inception_4a_output')
.conv(1, 1, 24, 1, 1, name='inception_4b_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4b_5x5'))
(self.feed('inception_4a_output')
.max_pool(3, 3, 1, 1, name='inception_4b_pool')
.conv(1, 1, 64, 1, 1, name='inception_4b_pool_proj'))
(self.feed('inception_4b_1x1',
'inception_4b_3x3',
'inception_4b_5x5',
'inception_4b_pool_proj')
.concat(3, name='inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_1x1'))
(self.feed('inception_4b_output')
.conv(1, 1, 128, 1, 1, name='inception_4c_3x3_reduce')
.conv(3, 3, 256, 1, 1, name='inception_4c_3x3'))
(self.feed('inception_4b_output')
.conv(1, 1, 24, 1, 1, name='inception_4c_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4c_5x5'))
(self.feed('inception_4b_output')
.max_pool(3, 3, 1, 1, name='inception_4c_pool')
.conv(1, 1, 64, 1, 1, name='inception_4c_pool_proj'))
(self.feed('inception_4c_1x1',
'inception_4c_3x3',
'inception_4c_5x5',
'inception_4c_pool_proj')
.concat(3, name='inception_4c_output')
.conv(1, 1, 112, 1, 1, name='inception_4d_1x1'))
(self.feed('inception_4c_output')
.conv(1, 1, 144, 1, 1, name='inception_4d_3x3_reduce')
.conv(3, 3, 288, 1, 1, name='inception_4d_3x3'))
(self.feed('inception_4c_output')
.conv(1, 1, 32, 1, 1, name='inception_4d_5x5_reduce')
.conv(5, 5, 64, 1, 1, name='inception_4d_5x5'))
(self.feed('inception_4c_output')
.max_pool(3, 3, 1, 1, name='inception_4d_pool')
.conv(1, 1, 64, 1, 1, name='inception_4d_pool_proj'))
(self.feed('inception_4d_1x1',
'inception_4d_3x3',
'inception_4d_5x5',
'inception_4d_pool_proj')
.concat(3, name='inception_4d_output')
.conv(1, 1, 256, 1, 1, name='inception_4e_1x1'))
(self.feed('inception_4d_output')
.conv(1, 1, 160, 1, 1, name='inception_4e_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_4e_3x3'))
(self.feed('inception_4d_output')
.conv(1, 1, 32, 1, 1, name='inception_4e_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_4e_5x5'))
(self.feed('inception_4d_output')
.max_pool(3, 3, 1, 1, name='inception_4e_pool')
.conv(1, 1, 128, 1, 1, name='inception_4e_pool_proj'))
(self.feed('inception_4e_1x1',
'inception_4e_3x3',
'inception_4e_5x5',
'inception_4e_pool_proj')
.concat(3, name='inception_4e_output')
.max_pool(3, 3, 2, 2, name='pool4_3x3_s2')
.conv(1, 1, 256, 1, 1, name='inception_5a_1x1'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 160, 1, 1, name='inception_5a_3x3_reduce')
.conv(3, 3, 320, 1, 1, name='inception_5a_3x3'))
(self.feed('pool4_3x3_s2')
.conv(1, 1, 32, 1, 1, name='inception_5a_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5a_5x5'))
(self.feed('pool4_3x3_s2')
.max_pool(3, 3, 1, 1, name='inception_5a_pool')
.conv(1, 1, 128, 1, 1, name='inception_5a_pool_proj'))
(self.feed('inception_5a_1x1',
'inception_5a_3x3',
'inception_5a_5x5',
'inception_5a_pool_proj')
.concat(3, name='inception_5a_output')
.conv(1, 1, 384, 1, 1, name='inception_5b_1x1'))
(self.feed('inception_5a_output')
.conv(1, 1, 192, 1, 1, name='inception_5b_3x3_reduce')
.conv(3, 3, 384, 1, 1, name='inception_5b_3x3'))
(self.feed('inception_5a_output')
.conv(1, 1, 48, 1, 1, name='inception_5b_5x5_reduce')
.conv(5, 5, 128, 1, 1, name='inception_5b_5x5'))
(self.feed('inception_5a_output')
.max_pool(3, 3, 1, 1, name='inception_5b_pool')
.conv(1, 1, 128, 1, 1, name='inception_5b_pool_proj'))
(self.feed('inception_5b_1x1',
'inception_5b_3x3',
'inception_5b_5x5',
'inception_5b_pool_proj')
.concat(3, name='inception_5b_output')
.avg_pool(7, 7, 1, 1, padding='VALID', name='pool5_7x7_s1')
.fc(1000, relu=False, name='loss3_classifier')
.softmax(name='prob'))
|
79519
|
import os
import logging
from .paths import get_path
_FORMAT = '%(asctime)s:%(levelname)s:%(lineno)s:%(module)s.%(funcName)s:%(message)s'
_formatter = logging.Formatter(_FORMAT, '%H:%M:%S')
_handler = logging.StreamHandler()
_handler.setFormatter(_formatter)
logging.basicConfig(filename=os.path.join(get_path(), 'spfeas.log'),
filemode='w',
level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.addHandler(_handler)
logger.setLevel(logging.INFO)
class CorruptedBandsError(OverflowError):
"""Raised when bands are corrupted"""
|
79520
|
from pathlib import Path
from unittest.mock import MagicMock
import pytest
from gretel_client.config import configure_session
FIXTURES = Path(__file__).parent / "fixtures"
@pytest.fixture
def get_fixture():
def _(name: str) -> Path:
return FIXTURES / name
return _
@pytest.fixture(scope="function", autouse=True)
def configure_session_client():
configure_session(MagicMock())
@pytest.fixture
def dev_ep() -> str:
return "https://api-dev.gretel.cloud"
|
79549
|
from __future__ import print_function
import os
import unittest
from .test_base_column_profilers import AbstractTestColumnProfiler
from dataprofiler.profilers.column_profile_compilers import \
ColumnPrimitiveTypeProfileCompiler
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestColumnDataTypeProfiler(AbstractTestColumnProfiler, unittest.TestCase):
column_profiler = ColumnPrimitiveTypeProfileCompiler
profile_types = ['data_type', 'statistics', 'data_type_representation']
def setUp(self):
AbstractTestColumnProfiler.setUp(self)
@classmethod
def setUpClass(cls):
super(TestColumnDataTypeProfiler, cls).setUpClass()
if __name__ == '__main__':
unittest.main()
|
79570
|
import unittest
from unittest.mock import MagicMock
import pandas as pd
from pandas.testing import assert_frame_equal
from data_export.pipeline.dataset import Dataset
class TestDataset(unittest.TestCase):
def setUp(self):
example = MagicMock()
example.to_dict.return_value = {"data": "example"}
self.examples = MagicMock()
self.examples.__iter__.return_value = [example]
label = MagicMock()
label.find_by.return_value = {"labels": ["label"]}
self.labels = MagicMock()
self.labels.__iter__.return_value = [label]
def test_to_dataframe(self):
dataset = Dataset(self.examples, self.labels)
df = dataset.to_dataframe()
expected = pd.DataFrame([{"data": "example", "labels": ["label"]}])
assert_frame_equal(df, expected)
|
79619
|
import copy
import logging
from abc import ABC
from typing import Dict, Optional, Type, Union
import torch
from pytorch_lightning import LightningModule
from torch.nn.modules import Module
from torch.utils.data import DataLoader
from .generic_model import GenericModel
from .lightning_model import LightningModel
logger = logging.getLogger()
class GenericMCDropoutModel(GenericModel, ABC):
"""
Generic model wrapper for mcdropout uncertainty estimator
"""
def __init__(
self,
model_class: Type[Module],
model_config: Union[str, Dict],
trainer_config: Union[str, Dict],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(GenericMCDropoutModel, self).__init__(model_class, model_config, trainer_config)
_check_mc_dropout_model(model_class, model_config)
self.n_estimators = n_estimators
self.eval_dropout_prob = eval_dropout_prob
def __call__(self, loader: DataLoader) -> torch.Tensor:
"""
:param loader: pytorch dataloader
:return: model predictions
"""
if self.current_model is None:
raise ValueError("No current model, call 'train(train_loader, valid_loader)' to train the model first")
predictions = []
model = self.current_model
model.eval()
with torch.no_grad():
_enable_only_dropout_layers(model, self.eval_dropout_prob)
for _ in range(self.n_estimators):
model_prediction = []
for x, _ in loader:
model_prediction.append(model(x).detach().cpu())
predictions.append(torch.cat(model_prediction, 0))
predictions = torch.stack(predictions)
return predictions
class LightningMCDropoutModel(GenericMCDropoutModel, LightningModel):
r"""
Wrapper for MC Dropout estimator with pytorch lightning trainer
Example:
.. code-block:: python
import torch
import pytorch_lightning as pl
class PyLModel(pl.LightningModule):
def __init__(self, in_dim, out_dim):
super(PyLModel, self).()
self.linear = torch.nn.Linear(in_dim, out_dim)
# need to define other train/test steps and optimizers methods required
# by pytorch-lightning to run this example
wrapper = LightningMCDropoutModel(
PyLModel,
model_config={"in_dim":10, "out_dim":1},
trainer_config={"epochs":100},
n_estimators=10,
eval_dropout_prob=0.2,
)
wrapper.train(train_loader, valid_loader)
predictions = wrapper(loader)
assert predictions.size(0) == 10
"""
def __init__(
self,
model_class: Type[LightningModule],
model_config: Union[Dict, str],
trainer_config: Union[Dict, str],
n_estimators: int = 10,
eval_dropout_prob: float = 0.2,
):
super(LightningMCDropoutModel, self).__init__(
model_class,
model_config,
trainer_config,
n_estimators=n_estimators,
eval_dropout_prob=eval_dropout_prob,
)
def _enable_only_dropout_layers(model: Module, p: Optional[float] = None) -> None:
def enable_dropout_on_module(m):
if m.__class__.__name__.startswith("Dropout"):
if isinstance(p, float) and (0 <= p <= 1):
m.p = p
elif isinstance(p, float) and (p < 0 or p > 1):
logger.warning(f"Evaluation dropout probability should be a float between 0 and 1, got {p}")
m.train()
model.apply(enable_dropout_on_module)
def _check_mc_dropout_model(model_class: Type[Module], model_config: Dict) -> None:
model = model_class(**model_config)
def has_dropout_module(model):
is_dropout = []
for m in model.children():
if m.__class__.__name__.startswith("Dropout"):
is_dropout.append(True)
else:
is_dropout += has_dropout_module(m)
return is_dropout
if not any(has_dropout_module(model)):
raise ValueError("Model provided do not contain any torch.nn.Dropout modules, cannot apply MC Dropout")
|
79637
|
import hashlib
import sys
import getpass
import argparse
import rx7 as rx
from LIB.Functions import pause, cls
from LIB.Hash import sa
def print_hashes(word, file=None, Print=True):
word=bytes(word, encoding='utf-8')
LIST = []
for name,func in sa.items():
try:
result = func(word).hexdigest()
LIST.append(result)
if Print:
print(f' {name.upper()}:{" "*(10-len(name))}{result}')
except TypeError:
pass
if file:
rx.write(str(file),'\n'.join(result))
BANNER = '''
88 88 db .dP"Y8 88 88
88 88 dPYb `Ybo." 88 88
888888 dP__Yb o.`Y8b 888888
88 88 dP""""Yb 8bodP' 88 88
dP""b8 888888 88b 88 888888 88""Yb db 888888 dP"Yb 88""Yb
dP `" 88__ 88Yb88 88__ 88__dP dPYb 88 dP Yb 88__dP
Yb "88 88"" 88 Y88 88"" 88"Yb dP__Yb 88 Yb dP 88"Yb
YboodP 888888 88 Y8 888888 88 Yb dP""""Yb 88 YbodP 88 Yb
'''
if __name__ == "__main__":
if len(sys.argv) > 1:
parser = argparse.ArgumentParser(
'Hash Generator',
description='Generate Hash of a word in all hash types',
allow_abbrev=False,
)
parser.add_argument('HASH',
help="Word which you want to get its hashes"
)
parser.add_argument('-f','--output-file',
metavar='FILE',
help='The file to save hashes of HASH to it'
)
parser.add_argument('-q','--quiet', action='store_false',
help='Run app in quiet mode (Do not print the hashes)'
)
args = parser.parse_args()
hashed_file_name = args.output_file
word = args.HASH
quiet = args.quiet
cls()
rx.style.print(BANNER, 'gold_3b')
print(f'''Here is list of hashes for "{rx.fg('dodger_blue_1')}{word}{rx.attr(0)}:"''')
print_hashes(word, hashed_file_name, quiet)
else:
while True:
cls()
rx.style.print(BANNER, 'gold_3b')
print('Use: "HASH||FILE" to save output to FILE \n')
inp= input('Enter String to Create Hashes: ')
if inp=='exit':
break
elif inp:
if '||' in inp:
inp = inp.split('||')
print(f'''Here is list of hashes for "{rx.fg('dodger_blue_1')}{inp[0]}{rx.attr(0)}":''')
print_hashes(inp[0],inp[1])
else:
print(f'''Here is list of hashes for "{rx.fg('dodger_blue_1')}{inp}{rx.attr(0)}":''')
print_hashes(inp)
pause()
|
79669
|
import typing
from dataclasses import dataclass
from starlette.datastructures import URL, QueryParams
@dataclass
class PageControl:
text: str
url: URL = None
is_active: bool = False
is_disabled: bool = False
def inclusive_range(st: int, en: int, cutoff: int) -> typing.List[int]:
"""
Return an inclusive range from 'st' to 'en',
bounded within a minimum of 1 and a maximum of 'cutoff'.
"""
st = max(st, 1)
en = min(en, cutoff)
return list(range(st, en + 1))
def get_page_number(url: URL) -> int:
"""
Return a page number specified in the URL query parameters.
"""
query_params = QueryParams(url.query)
try:
return int(query_params.get("page", default="1"))
except (TypeError, ValueError):
return 1
def get_page_controls(
url: URL, current_page: int, total_pages: int
) -> typing.List[PageControl]:
"""
Returns a list of pagination controls, using GitHub's style for rendering
which controls should be displayed. See eg. issue pages in GitHub.
Previous [1] 2 3 4 5 ... 14 15 Next
"""
assert total_pages >= 1
assert current_page >= 1
assert current_page <= total_pages
# If we've only got a single page, then don't include pagination controls.
if total_pages == 1:
return []
# We always have 5 contextual page numbers around the current page.
if current_page <= 2:
# If we're on the first or second-to-first page, then our 5 contextual
# pages should start from the first page onwards.
main_block = inclusive_range(1, 5, cutoff=total_pages)
elif current_page >= total_pages - 1:
# If we're on the last or second-to-last page, then our 5 contextual
# pages should end with the final page backwards.
main_block = inclusive_range(total_pages - 4, total_pages, cutoff=total_pages)
else:
# All other cases, our 5 contextual pages should be 2 pages on either
# side of our current page.
main_block = inclusive_range(
current_page - 2, current_page + 2, cutoff=total_pages
)
# We always have 2 contextual page numbers at the start.
start_block = inclusive_range(1, 2, cutoff=total_pages)
if main_block[0] == 4:
# If we've only got a gap of one between the start and main blocks
# then fill in the gap with a page marker.
# | 1 2 3 4 5 [6] 7 8
start_block += [3]
elif main_block[0] > 4:
# If we've got a gap of more that one between the start and main
# blocks then fill in the gap with an ellipsis marker.
# | 1 2 … 5 6 [7] 8 9
start_block += [None]
# We always have 2 contextual page numbers at the end.
end_block = inclusive_range(total_pages - 1, total_pages, cutoff=total_pages)
if main_block[-1] == total_pages - 3:
# If we've got a gap of one between the end and main blocks then
# fill in the gap with an page marker.
# 92 93 [94] 95 96 97 98 99 |
end_block = [total_pages - 2] + end_block
elif main_block[-1] < total_pages - 3:
# If we've got a gap of more that one between the end and main
# blocks then fill in the gap with an ellipsis marker.
# 91 92 [93] 94 95 … 98 99 |
end_block = [None] + end_block
# We've got a list of integer/None values representing which pages to
# display in the controls. Now we use those to generate the actual
# PageControl instances.
seen_numbers = set()
controls = []
# Add a 'Previous' page control.
if current_page == 1:
previous_url = None
previous_disabled = True
elif current_page == 2:
previous_url = url.remove_query_params("page")
previous_disabled = False
else:
previous_url = url.include_query_params(page=current_page - 1)
previous_disabled = False
previous = PageControl(
text="Previous", url=previous_url, is_disabled=previous_disabled
)
controls.append(previous)
for page_number in start_block + main_block + end_block:
if page_number is None:
gap = PageControl(text="…", is_disabled=True)
controls.append(gap)
elif page_number not in seen_numbers:
seen_numbers.add(page_number)
if page_number == 1:
page_url = url.remove_query_params("page")
else:
page_url = url.include_query_params(page=page_number)
page = PageControl(
text=str(page_number),
url=page_url,
is_active=page_number == current_page,
)
controls.append(page)
# Add a 'Next' page control.
if current_page == total_pages:
next_url = None
next_disabled = True
else:
next_url = url.include_query_params(page=current_page + 1)
next_disabled = False
next = PageControl(text="Next", url=next_url, is_disabled=next_disabled)
controls.append(next)
return controls
|
79700
|
from reconbf.modules import test_kernel
from reconbf.lib.result import Result
from reconbf.lib import utils
import unittest
from mock import patch
class PtraceScope(unittest.TestCase):
def test_no_yama(self):
with patch.object(utils, 'kconfig_option', return_value=None):
res = test_kernel.test_ptrace_scope()
self.assertEqual(res.result, Result.FAIL)
def test_level_0(self):
with patch.object(utils, 'kconfig_option', return_value='y'):
with patch.object(utils, 'get_sysctl_value', return_value='0'):
res = test_kernel.test_ptrace_scope()
self.assertEqual(res.result, Result.FAIL)
def test_level_1(self):
with patch.object(utils, 'kconfig_option', return_value='y'):
with patch.object(utils, 'get_sysctl_value', return_value='1'):
res = test_kernel.test_ptrace_scope()
self.assertEqual(res.result, Result.PASS)
|
79778
|
from collections import defaultdict
import numpy as np
class MetricsAccumulator:
def __init__(self) -> None:
self.accumulator = defaultdict(lambda: [])
def update_metric(self, metric_name, metric_value):
self.accumulator[metric_name].append(metric_value)
def print_average_metric(self):
for k, v in self.accumulator.items():
average_v = np.array(v).mean()
print(f"{k} - {average_v:.2f}")
self.__init__()
|
79803
|
data = (
'Mang ', # 0x00
'Zhu ', # 0x01
'Utsubo ', # 0x02
'Du ', # 0x03
'Ji ', # 0x04
'Xiao ', # 0x05
'Ba ', # 0x06
'Suan ', # 0x07
'Ji ', # 0x08
'Zhen ', # 0x09
'Zhao ', # 0x0a
'Sun ', # 0x0b
'Ya ', # 0x0c
'Zhui ', # 0x0d
'Yuan ', # 0x0e
'Hu ', # 0x0f
'Gang ', # 0x10
'Xiao ', # 0x11
'Cen ', # 0x12
'Pi ', # 0x13
'Bi ', # 0x14
'Jian ', # 0x15
'Yi ', # 0x16
'Dong ', # 0x17
'Shan ', # 0x18
'Sheng ', # 0x19
'Xia ', # 0x1a
'Di ', # 0x1b
'Zhu ', # 0x1c
'Na ', # 0x1d
'Chi ', # 0x1e
'Gu ', # 0x1f
'Li ', # 0x20
'Qie ', # 0x21
'Min ', # 0x22
'Bao ', # 0x23
'Tiao ', # 0x24
'Si ', # 0x25
'Fu ', # 0x26
'Ce ', # 0x27
'Ben ', # 0x28
'Pei ', # 0x29
'Da ', # 0x2a
'Zi ', # 0x2b
'Di ', # 0x2c
'Ling ', # 0x2d
'Ze ', # 0x2e
'Nu ', # 0x2f
'Fu ', # 0x30
'Gou ', # 0x31
'Fan ', # 0x32
'Jia ', # 0x33
'Ge ', # 0x34
'Fan ', # 0x35
'Shi ', # 0x36
'Mao ', # 0x37
'Po ', # 0x38
'Sey ', # 0x39
'Jian ', # 0x3a
'Qiong ', # 0x3b
'Long ', # 0x3c
'Souke ', # 0x3d
'Bian ', # 0x3e
'Luo ', # 0x3f
'Gui ', # 0x40
'Qu ', # 0x41
'Chi ', # 0x42
'Yin ', # 0x43
'Yao ', # 0x44
'Xian ', # 0x45
'Bi ', # 0x46
'Qiong ', # 0x47
'Gua ', # 0x48
'Deng ', # 0x49
'Jiao ', # 0x4a
'Jin ', # 0x4b
'Quan ', # 0x4c
'Sun ', # 0x4d
'Ru ', # 0x4e
'Fa ', # 0x4f
'Kuang ', # 0x50
'Zhu ', # 0x51
'Tong ', # 0x52
'Ji ', # 0x53
'Da ', # 0x54
'Xing ', # 0x55
'Ce ', # 0x56
'Zhong ', # 0x57
'Kou ', # 0x58
'Lai ', # 0x59
'Bi ', # 0x5a
'Shai ', # 0x5b
'Dang ', # 0x5c
'Zheng ', # 0x5d
'Ce ', # 0x5e
'Fu ', # 0x5f
'Yun ', # 0x60
'Tu ', # 0x61
'Pa ', # 0x62
'Li ', # 0x63
'Lang ', # 0x64
'Ju ', # 0x65
'Guan ', # 0x66
'Jian ', # 0x67
'Han ', # 0x68
'Tong ', # 0x69
'Xia ', # 0x6a
'Zhi ', # 0x6b
'Cheng ', # 0x6c
'Suan ', # 0x6d
'Shi ', # 0x6e
'Zhu ', # 0x6f
'Zuo ', # 0x70
'Xiao ', # 0x71
'Shao ', # 0x72
'Ting ', # 0x73
'Ce ', # 0x74
'Yan ', # 0x75
'Gao ', # 0x76
'Kuai ', # 0x77
'Gan ', # 0x78
'Chou ', # 0x79
'Kago ', # 0x7a
'Gang ', # 0x7b
'Yun ', # 0x7c
'O ', # 0x7d
'Qian ', # 0x7e
'Xiao ', # 0x7f
'Jian ', # 0x80
'Pu ', # 0x81
'Lai ', # 0x82
'Zou ', # 0x83
'Bi ', # 0x84
'Bi ', # 0x85
'Bi ', # 0x86
'Ge ', # 0x87
'Chi ', # 0x88
'Guai ', # 0x89
'Yu ', # 0x8a
'Jian ', # 0x8b
'Zhao ', # 0x8c
'Gu ', # 0x8d
'Chi ', # 0x8e
'Zheng ', # 0x8f
'Jing ', # 0x90
'Sha ', # 0x91
'Zhou ', # 0x92
'Lu ', # 0x93
'Bo ', # 0x94
'Ji ', # 0x95
'Lin ', # 0x96
'Suan ', # 0x97
'Jun ', # 0x98
'Fu ', # 0x99
'Zha ', # 0x9a
'Gu ', # 0x9b
'Kong ', # 0x9c
'Qian ', # 0x9d
'Quan ', # 0x9e
'Jun ', # 0x9f
'Chui ', # 0xa0
'Guan ', # 0xa1
'Yuan ', # 0xa2
'Ce ', # 0xa3
'Ju ', # 0xa4
'Bo ', # 0xa5
'Ze ', # 0xa6
'Qie ', # 0xa7
'Tuo ', # 0xa8
'Luo ', # 0xa9
'Dan ', # 0xaa
'Xiao ', # 0xab
'Ruo ', # 0xac
'Jian ', # 0xad
'Xuan ', # 0xae
'Bian ', # 0xaf
'Sun ', # 0xb0
'Xiang ', # 0xb1
'Xian ', # 0xb2
'Ping ', # 0xb3
'Zhen ', # 0xb4
'Sheng ', # 0xb5
'Hu ', # 0xb6
'Shi ', # 0xb7
'Zhu ', # 0xb8
'Yue ', # 0xb9
'Chun ', # 0xba
'Lu ', # 0xbb
'Wu ', # 0xbc
'Dong ', # 0xbd
'Xiao ', # 0xbe
'Ji ', # 0xbf
'Jie ', # 0xc0
'Huang ', # 0xc1
'Xing ', # 0xc2
'Mei ', # 0xc3
'Fan ', # 0xc4
'Chui ', # 0xc5
'Zhuan ', # 0xc6
'Pian ', # 0xc7
'Feng ', # 0xc8
'Zhu ', # 0xc9
'Hong ', # 0xca
'Qie ', # 0xcb
'Hou ', # 0xcc
'Qiu ', # 0xcd
'Miao ', # 0xce
'Qian ', # 0xcf
None, # 0xd0
'Kui ', # 0xd1
'Sik ', # 0xd2
'Lou ', # 0xd3
'Yun ', # 0xd4
'He ', # 0xd5
'Tang ', # 0xd6
'Yue ', # 0xd7
'Chou ', # 0xd8
'Gao ', # 0xd9
'Fei ', # 0xda
'Ruo ', # 0xdb
'Zheng ', # 0xdc
'Gou ', # 0xdd
'Nie ', # 0xde
'Qian ', # 0xdf
'Xiao ', # 0xe0
'Cuan ', # 0xe1
'Gong ', # 0xe2
'Pang ', # 0xe3
'Du ', # 0xe4
'Li ', # 0xe5
'Bi ', # 0xe6
'Zhuo ', # 0xe7
'Chu ', # 0xe8
'Shai ', # 0xe9
'Chi ', # 0xea
'Zhu ', # 0xeb
'Qiang ', # 0xec
'Long ', # 0xed
'Lan ', # 0xee
'Jian ', # 0xef
'Bu ', # 0xf0
'Li ', # 0xf1
'Hui ', # 0xf2
'Bi ', # 0xf3
'Di ', # 0xf4
'Cong ', # 0xf5
'Yan ', # 0xf6
'Peng ', # 0xf7
'Sen ', # 0xf8
'Zhuan ', # 0xf9
'Pai ', # 0xfa
'Piao ', # 0xfb
'Dou ', # 0xfc
'Yu ', # 0xfd
'Mie ', # 0xfe
'Zhuan ', # 0xff
)
|
79804
|
from twilio.twiml.voice_response import Pay, VoiceResponse
response = VoiceResponse()
response.pay()
print(response)
|
79815
|
import functools
from typing import Optional, Sequence
from fvcore.common.registry import Registry as _Registry
from tabulate import tabulate
class Registry(_Registry):
"""Extension of fvcore's registry that supports aliases."""
_ALIAS_KEYWORDS = ("_aliases", "_ALIASES")
def __init__(self, name: str):
super().__init__(name=name)
self._metadata_map = {}
def _get_aliases(self, obj_func_or_class):
for kw in self._ALIAS_KEYWORDS:
if hasattr(obj_func_or_class, kw):
return getattr(obj_func_or_class, kw)
return []
def register(self, obj: object = None, aliases: Sequence[str] = None) -> Optional[object]:
if obj is None:
# used as a decorator
def deco(func_or_class: object, aliases=None) -> object:
name = func_or_class.__name__
self._do_register(name, func_or_class)
if aliases is None:
aliases = self._get_aliases(func_or_class)
if not isinstance(aliases, (list, tuple, set)):
aliases = [aliases]
for alias in aliases:
self._do_register(alias, func_or_class, is_alias=True)
return func_or_class
kwargs = {"aliases": aliases}
if any(v is not None for v in kwargs.values()):
return functools.partial(deco, **kwargs)
else:
return deco
name = obj.__name__
self._do_register(name, obj)
if aliases is None:
aliases = self._get_aliases(obj) if isinstance(obj, type) else []
for alias in aliases:
self._do_register(alias, obj, is_alias=True)
def _do_register(self, name: str, obj: object, **kwargs) -> None:
docstring = obj.__doc__
if docstring is None:
docstring = ""
aliases = self._get_aliases(obj) if isinstance(obj, type) else None
if not aliases:
aliases = None
self._metadata_map[name] = {
"name": name,
"description": kwargs.pop("description", docstring.split("\n")[0]),
"aliases": aliases,
**kwargs,
}
return super()._do_register(name, obj)
def clear(self):
self._obj_map = {}
self._metadata_map = {}
def __repr__(self) -> str:
metadata = [v for v in self._metadata_map.values() if not v.get("is_alias", False)]
table = tabulate(metadata, headers="keys", tablefmt="fancy_grid")
return "Registry of {}:\n{}".format(self._name, table)
|
79831
|
import scrapy
class QuotesSpider(scrapy.Spider):
name = "quotes2"
start_urls = [
'http://quotes.toscrape.com/page/1/',
'http://quotes.toscrape.com/page/2/',
]
def parse(self, response):
self.log('I just visited {}'.format(response.url))
|
79872
|
import json
# import logging
from .utils import is_invalid_params
from .exceptions import (
JSONRPCInvalidParams,
JSONRPCInvalidRequest,
JSONRPCInvalidRequestException,
JSONRPCMethodNotFound,
JSONRPCParseError,
JSONRPCServerError,
JSONRPCDispatchException,
)
from .jsonrpc1 import JSONRPC10Response
from .jsonrpc2 import (
JSONRPC20BatchRequest,
JSONRPC20BatchResponse,
JSONRPC20Response,
)
from .jsonrpc import JSONRPCRequest
# logger = logging.getLogger(__name__)
class JSONRPCResponseManager(object):
""" JSON-RPC response manager.
Method brings syntactic sugar into library. Given dispatcher it handles
request (both single and batch) and handles errors.
Request could be handled in parallel, it is server responsibility.
:param str request_str: json string. Will be converted into
JSONRPC20Request, JSONRPC20BatchRequest or JSONRPC10Request
:param dict dispather: dict<function_name:function>.
"""
RESPONSE_CLASS_MAP = {
"1.0": JSONRPC10Response,
"2.0": JSONRPC20Response,
}
@classmethod
def handle(cls, request_str, dispatcher):
if isinstance(request_str, bytes):
request_str = request_str.decode("utf-8")
try:
json.loads(request_str)
except (TypeError, ValueError):
return JSONRPC20Response(error=JSONRPCParseError()._data)
try:
request = JSONRPCRequest.from_json(request_str)
except JSONRPCInvalidRequestException:
return JSONRPC20Response(error=JSONRPCInvalidRequest()._data)
return cls.handle_request(request, dispatcher)
@classmethod
def handle_request(cls, request, dispatcher):
""" Handle request data.
At this moment request has correct jsonrpc format.
:param dict request: data parsed from request_str.
:param jsonrpc.dispatcher.Dispatcher dispatcher:
.. versionadded: 1.8.0
"""
rs = request if isinstance(request, JSONRPC20BatchRequest) \
else [request]
responses = [r for r in cls._get_responses(rs, dispatcher)
if r is not None]
# notifications
if not responses:
return
if isinstance(request, JSONRPC20BatchRequest):
return JSONRPC20BatchResponse(*responses)
else:
return responses[0]
@classmethod
def _get_responses(cls, requests, dispatcher):
""" Response to each single JSON-RPC Request.
:return iterator(JSONRPC20Response):
.. versionadded: 1.9.0
TypeError inside the function is distinguished from Invalid Params.
"""
for request in requests:
def response(**kwargs):
return cls.RESPONSE_CLASS_MAP[request.JSONRPC_VERSION](
_id=request._id, **kwargs)
try:
method = dispatcher[request.method]
except KeyError:
output = response(error=JSONRPCMethodNotFound()._data)
else:
try:
result = method(*request.args, **request.kwargs)
except JSONRPCDispatchException as e:
output = response(error=e.error._data)
except Exception as e:
data = {
"type": e.__class__.__name__,
"args": e.args,
"message": str(e),
}
if isinstance(e, TypeError) and is_invalid_params(
method, *request.args, **request.kwargs):
output = response(
error=JSONRPCInvalidParams(data=data)._data)
else:
# logger.exception("API Exception: {0}".format(data))
print("API Exception: {0}".format(data))
output = response(
error=JSONRPCServerError(data=data)._data)
else:
output = response(result=result)
finally:
if not request.is_notification:
yield output
|
79938
|
import yaml
import numpy as np
from os import path
from absl import flags
from pysc2.env import sc2_env
from pysc2.lib import features
from pysc2.lib import actions
sc2_f_path = path.abspath(path.join(path.dirname(__file__), "..", "configs", "sc2_config.yml"))
with open(sc2_f_path, 'r') as ymlfile:
sc2_cfg = yaml.load(ymlfile)
# TODO: update README.md for adding random seed for game env
def create_sc2_minigame_env(map_name, mode, visualize=False):
"""Create sc2 game env with available actions printer
Set screen, minimap same resolution and x, y same pixels for simplicity.
"""
assert mode in ['full', 'lite', 'test']
# workaround for pysc2 flags
FLAGS = flags.FLAGS
FLAGS([__file__])
env_seed = 3 if mode == 'test' else None
env = sc2_env.SC2Env(
map_name=map_name,
step_mul=sc2_cfg[mode]['step_mul'],
screen_size_px=(sc2_cfg[mode]['resl'],) * 2,
minimap_size_px=(sc2_cfg[mode]['resl'],) * 2,
visualize=visualize,
random_seed=env_seed)
return env
# TODO: move preprocess to neuro net embed layer
# TODO: move post process into sc2_env extension
class GameInterfaceHandler(object):
"""Provide game interface info.
Transform observed game image and available actions into CNN input tensors.
- Special Categorial 2d image:
single layer normalized by scalar max
(no same category overlapping)
- Categorial 2d image:
expand to multiple layer
- Scalar 2d image:
single layer normalized by scalar max
NOTE: This class can potentially be a decorator to wrap sc2_env
"""
def __init__(self, mode):
assert mode in ['full', 'lite', 'test']
self.dtype = np.float32
self.minimap_player_id = features.MINIMAP_FEATURES.player_id.index
self.screen_player_id = features.SCREEN_FEATURES.player_id.index
self.screen_unit_type = features.SCREEN_FEATURES.unit_type.index
self.screen_resolution = sc2_cfg[mode]['resl']
self.minimap_resolution = sc2_cfg[mode]['resl']
(self.sub_to_full_acts, self.full_to_sub_acts) = self._get_action_mappings(
sc2_cfg[mode]['action_list'])
self.num_action = len(self.sub_to_full_acts)
self.non_spatial_actions = self._get_non_spatial_actions()
self.screen_imgs = sc2_cfg[mode]['screen_imgs']
self.minimap_imgs = sc2_cfg[mode]['minimap_imgs']
@property
def screen_channels(self):
"""Return number of channels for preprocessed screen image"""
channels = 0
for i, screen_feature in enumerate(features.SCREEN_FEATURES):
if len(self.screen_imgs) > 0 and i not in self.screen_imgs:
continue
if i == self.screen_player_id or i == self.screen_unit_type:
channels += 1
elif screen_feature.type == features.FeatureType.SCALAR:
channels += 1
else:
channels += screen_feature.scale
return channels
def _preprocess_screen(self, screen):
"""Transform screen image into expanded tensor
Args:
screen: obs.observation['screen']
Returns:
ndarray, shape (len(SCREEN_FEATURES), screen_size_px.y, screen_size_px.x)
"""
screen = np.array(screen, dtype=self.dtype)
layers = []
assert screen.shape[0] == len(features.SCREEN_FEATURES)
for i, screen_feature in enumerate(features.SCREEN_FEATURES):
if len(self.screen_imgs) > 0 and i not in self.screen_imgs:
continue
if i == self.screen_player_id or i == self.screen_unit_type:
layers.append(np.log(screen[i:i + 1] + 1.))
elif screen_feature.type == features.FeatureType.SCALAR:
layers.append(np.log(screen[i:i + 1] + 1.))
else:
layer = np.zeros(
(screen_feature.scale, screen.shape[1], screen.shape[2]),
dtype=self.dtype)
for j in range(screen_feature.scale):
indy, indx = (screen[i] == j).nonzero()
layer[j, indy, indx] = 1
layers.append(layer)
return np.concatenate(layers, axis=0)
def get_screen(self, observation):
"""Extract screen variable from observation['minimap']
Args:
observation: Timestep.obervation
Returns:
screen: ndarray, shape (1, len(SCREEN_FEATURES), screen_size_px.y, screen_size_px.x)
"""
screen = self._preprocess_screen(observation['screen'])
return np.expand_dims(screen, 0)
@property
def minimap_channels(self):
"""Return number of channels for preprocessed minimap image"""
channels = 0
for i, minimap_feature in enumerate(features.MINIMAP_FEATURES):
if len(self.minimap_imgs) > 0 and i not in self.minimap_imgs:
continue
if i == self.minimap_player_id:
channels += 1
elif minimap_feature.type == features.FeatureType.SCALAR:
channels += 1
else:
channels += minimap_feature.scale
return channels
def _preprocess_minimap(self, minimap):
"""Transform minimap image into expanded tensor
Args:
minimap: obs.observation['minimap']
Returns:
ndarray, shape (len(MINIMAP_FEATURES), minimap_size_px.y, minimap_size_px.x)
"""
minimap = np.array(minimap, dtype=self.dtype)
layers = []
assert minimap.shape[0] == len(features.MINIMAP_FEATURES)
for i, minimap_feature in enumerate(features.MINIMAP_FEATURES):
if len(self.minimap_imgs) > 0 and i not in self.minimap_imgs:
continue
if i == self.minimap_player_id:
layers.append(np.log(minimap[i:i + 1] + 1.))
elif minimap_feature.type == features.FeatureType.SCALAR:
layers.append(np.log(minimap[i:i + 1] + 1.))
else:
layer = np.zeros(
(minimap_feature.scale, minimap.shape[1], minimap.shape[2]),
dtype=self.dtype)
for j in range(minimap_feature.scale):
indy, indx = (minimap[i] == j).nonzero()
layer[j, indy, indx] = 1
layers.append(layer)
return np.concatenate(layers, axis=0)
def get_minimap(self, observation):
"""Extract minimap variable from observation['minimap']
Args:
observation: Timestep.observation
Returns:
minimap: ndarray, shape (1, len(MINIMAP_FEATURES), minimap_size_px.y, minimap_size_px.x)
"""
minimap = self._preprocess_minimap(observation['minimap'])
return np.expand_dims(minimap, 0)
def _preprocess_available_actions(self, available_actions):
"""Returns ndarray of available_actions from observed['available_actions']
shape (num_actions)
"""
available_actions = np.intersect1d(available_actions, self.sub_to_full_acts)
a_actions = np.zeros((self.num_action), dtype=self.dtype)
a_actions[self.full_to_sub_acts[available_actions]] = 1.
return a_actions
def get_available_actions(self, observation):
"""
Args:
observation: Timestep.observation
Returns:
available_action: ndarray, shape(num_actions)
"""
return self._preprocess_available_actions(
observation['available_actions'])
def get_info(self, observation):
"""Extract available actioins as info from state.observation['available_actioins']
Args:
observation: Timestep.observation
Returns:
info: ndarray, shape (num_actions)
"""
return self.get_available_actions(observation)
def postprocess_action(self, non_spatial_action, spatial_action):
"""Transform selected non_spatial and spatial actions into pysc2 FunctionCall
Args:
non_spatial_action: ndarray, shape (1, 1)
spatial_action: ndarray, shape (1, 1)
Returns:
FunctionCall as action for pysc2_env
"""
act_id = self.sub_to_full_acts[non_spatial_action[0][0]]
target = spatial_action[0][0]
target_point = [
int(target % self.screen_resolution),
int(target // self.screen_resolution)
] # (x, y)
act_args = []
for arg in actions.FUNCTIONS[act_id].args:
if arg.name in ('screen', 'minimap', 'screen2'):
act_args.append(target_point)
else:
act_args.append([0])
return actions.FunctionCall(act_id, act_args)
def _get_non_spatial_actions(self):
non_spatial_actions = [True] * self.num_action
for func_id, func in enumerate(actions.FUNCTIONS):
for arg in func.args:
if arg.name in ('screen', 'minimap', 'screen2'):
non_spatial_actions[self.full_to_sub_acts[func_id]] = False
break
return non_spatial_actions
def is_non_spatial_action(self, action_id):
return self.non_spatial_actions[self.full_to_sub_acts[action_id]]
def _get_action_mappings(self, action_list):
"""Fill actioin list if it's empty
Args:
action_list: list
Returns:
sub_to_full_acts: ndarray
full_to_sub_acts: ndarray
"""
if len(action_list) == 0:
action_list = [i for i in range(len(actions.FUNCTIONS))]
sub_to_full_acts = action_list
full_to_sub_acts = [-1] * len(actions.FUNCTIONS)
for idx, val in enumerate(sub_to_full_acts):
full_to_sub_acts[val] = idx
return (np.asarray(sub_to_full_acts, dtype=np.int32), np.asarray(full_to_sub_acts, dtype=np.int32))
|
79939
|
from utils.path import *
from utils.audio.tools import get_mel
from tqdm import tqdm
import numpy as np
import glob, os, sys
from multiprocessing import Pool
from scipy.io.wavfile import write
import librosa, ffmpeg
from sklearn.preprocessing import StandardScaler
def job(wav_filename):
original_wav_filename, prepro_wav_dir, sampling_rate = wav_filename
filename = original_wav_filename.split("/")[-1]
new_wav_filename = get_path(prepro_wav_dir, filename)
if not os.path.exists(new_wav_filename):
try:
out, err = (ffmpeg
.input(original_wav_filename)
.output(new_wav_filename, acodec='pcm_s16le', ac=1, ar=sampling_rate)
.overwrite_output()
.run(capture_stdout=True, capture_stderr=True))
except ffmpeg.Error as err:
print(err.stderr, file=sys.stderr)
raise
def preprocess(data_path, prepro_wav_dir, prepro_path, mel_path, sampling_rate, n_workers=10, filter_length=1024, hop_length=256, trim_silence=True, top_db=60):
p = Pool(n_workers)
mel_scaler = StandardScaler(copy=False)
prepro_wav_dir = create_dir(prepro_wav_dir)
wav_paths=[[filename, prepro_wav_dir, sampling_rate] for filename in list(glob.glob(get_path(data_path, "wav48", "**", "*.wav")))]
print("\t[LOG] converting wav format...")
with tqdm(total=len(wav_paths)) as pbar:
for _ in tqdm(p.imap_unordered(job, wav_paths)):
pbar.update()
print("\t[LOG] saving mel-spectrogram...")
with tqdm(total=len(wav_paths)) as pbar:
for wav_filename in tqdm(glob.glob(get_path(prepro_wav_dir, "*.wav"))):
mel_filename = wav_filename.split("/")[-1].replace("wav", "npy")
mel_savepath = get_path(mel_path, mel_filename)
mel_spectrogram, _ = get_mel(wav_filename, trim_silence=trim_silence, frame_length=filter_length, hop_length=hop_length, top_db=top_db)
mel_scaler.partial_fit(mel_spectrogram)
np.save(mel_savepath, mel_spectrogram)
np.save(get_path(prepro_path, "mel_stats.npy"), np.array([mel_scaler.mean_, mel_scaler.scale_]))
print("Done!")
def split_unseen_speakers(prepro_mel_dir):
print("[LOG] 6 UNSEEN speakers: \n\t p226(Male, English, Surrey) \n\t p256(Male, English, Birmingham) \
\n\t p266(Female, Irish, Athlone) \n\t p297(Female, American, Newyork) \
\n\t p323 (Female, SouthAfrican, Pretoria)\n\t p376(Male, Indian)")
unseen_speaker_list = ["p226", "p256", "p266", "p297", "p323", "p376"]
seen_speaker_files, unseen_speaker_files = [], []
preprocessed_file_list = glob.glob(get_path(prepro_mel_dir, "*.npy"))
for preprocessed_mel_file in preprocessed_file_list:
speaker = preprocessed_mel_file.split("/")[-1].split("_")[0]
if speaker in unseen_speaker_list:
unseen_speaker_files.append(preprocessed_mel_file)
else:
seen_speaker_files.append(preprocessed_mel_file)
return seen_speaker_files, unseen_speaker_files
|
79953
|
from paleomix.nodes.bowtie2 import Bowtie2IndexNode, Bowtie2Node
########################################################################################
# Indexing
def test_index_description():
node = Bowtie2IndexNode(input_file="/path/genome.fasta")
assert str(node) == "creating Bowtie2 index for /path/genome.fasta"
########################################################################################
# BWA mem
def test_bwa_mem_description__se():
node = Bowtie2Node(
input_file_1="/path/reads_1.fq.gz",
input_file_2=None,
output_file="/path/output.bam",
reference="/path/my_genome.fasta",
)
assert str(node) == "aligning '/path/reads_1.fq.gz' onto my_genome using Bowtie2"
def test_bwa_mem_description__pe():
node = Bowtie2Node(
input_file_1="/path/reads_1.fq.gz",
input_file_2="/path/reads_2.fq.gz",
output_file="/path/output.bam",
reference="/path/my_genome.fasta",
)
assert str(node) == "aligning '/path/reads_[12].fq.gz' onto my_genome using Bowtie2"
|
80014
|
import sys
import os
import argparse
import shutil
from os.path import join as pjoin
from xml.etree import ElementTree
from collections import namedtuple
from pprint import pprint
from distutils.dir_util import mkpath
class AdobeCCFontExtractor:
pass
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
# global configuration object
class Config:
path_prefix = ''
font_dir = ''
manifest = ''
install_path = ''
# font data object
FontData = namedtuple('FontData', 'id name weight')
def get_font_metadata(manifest_path):
tree = ElementTree.parse(manifest_path)
# find the <fonts> element containing the list of fonts
fonts_subtree = tree.getroot().find('fonts')
fonts = []
for font_elem in fonts_subtree.findall('font'):
props = font_elem.find('properties')
f_id = font_elem.find('id').text
f_name = props.find('familyName').text
f_weight = props.find('variationName').text
font = FontData(id=f_id, name=f_name, weight=f_weight)
fonts.append(font)
return fonts
# install the fonts on the system per the --install flag
def install_fonts(fonts):
pass
# extract the fonts to location
# folder structure:
# location/
# Font1/
# Font1 - Variation1.otf
# Font1 - Variation2.otf
def extract_fonts(fonts, font_dir, location):
# make dirs to location if they don't exist
mkpath(location)
for font in fonts:
src = pjoin(font_dir, str(font.id))
filename = font.name + ' - ' + font.weight
dest = pjoin(location, filename)
shutil.copy(src, dest)
def sync_all_fonts():
''' Go to the Adobe CC website and sync EVERY font '''
pass
def platform_setup():
'''Set up paths for MacOS or Windows'''
c = Config()
if sys.platform == 'win32': # Windows
c.path_prefix = \
os.path.expandvars(r'%HOME%\AppData\Roaming\Adobe\CoreSync\plugins\livetype')
c.font_dir = pjoin(c.path_prefix, 'r')
c.manifest = pjoin(c.path_prefix, r'c\entitlements.xml')
else: # MacOS
c.path_prefix = \
os.path.expandvars(r'$HOME/Library/Application Support/Adobe/CoreSync/plugins/livetype')
c.font_dir = os.path.join(c.path_prefix, '.r')
c.manifest = os.path.join(c.path_prefix, '.c/entitlements.xml')
return c
def main():
config = platform_setup()
# parse the command line arguments
parser = argparse.ArgumentParser(description=\
'Extract Adobe CC Typekit fonts. '
'Adobe CC Font Sync syncs your fonts from Typekit, however '
'These fonts are not available')
#parser.add_argument('--install', type=
parser.add_argument('-l', '--list', help='show which fonts are synced')
parser.parse_args()
try:
font_data = get_font_metadata(config.manifest)
pprint(font_data)
except IOError as e:
print("Error: The font manifest could not be found. Make sure Adobe Creative Cloud is running.")
def test():
config = platform_setup()
fonts = get_font_metadata(config.manifest)
extract_fonts(fonts, config.font_dir, 'TEST')
if __name__ == '__main__':
main()
|
80017
|
import logging
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from polymorphic_sqlalchemy import (create_polymorphic_base, Relation,
PolyField, NetRelationship, NetModel, BaseInitializer)
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.associationproxy import association_proxy
test_app = Flask(__name__)
test_app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
test_app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(test_app)
logger = logging.getLogger(__name__)
class Dealer:
""" Dealer model for the dealers table (pseudo) """
def __init__(self, id):
self.id = id
@classmethod
def find(cls, id):
return cls(id)
def __eq__(self, other):
return self.id == other.id
def __repr__(self):
return '< Dealer id: {} >'.format(self.id)
class Vehicle(BaseInitializer, db.Model):
__tablename__ = "vehicle"
id = Column(Integer, primary_key=True, autoincrement=True)
source_id = Column(String(50), nullable=False)
source_type = Column(String(50), nullable=False)
source = PolyField(prefix='source')
source__dealer = NetRelationship(prefix='source', _class=Dealer)
HasVehicle = create_polymorphic_base(data_class=Vehicle, data_class_attr='source')
class LocalDealer(BaseInitializer, db.Model, HasVehicle):
"""
Local Dealer means dealer table that is locally in our database
"""
__tablename__ = "local_dealer"
id = Column(Integer, primary_key=True, autoincrement=True)
class Records(BaseInitializer, db.Model):
__tablename__ = "records"
id = Column(Integer, primary_key=True, autoincrement=True)
buyer_id = Column(String(50), nullable=False)
buyer_type = Column(String(50), nullable=False)
seller_id = Column(String(50), nullable=False)
seller_type = Column(String(50), nullable=False)
buyer__dealer = NetRelationship(prefix='buyer', _class=Dealer)
seller__dealer = NetRelationship(prefix='seller', _class=Dealer)
buyer = PolyField(prefix='buyer')
seller = PolyField(prefix='seller')
relations = (
Relation(data_class=Records, data_class_attr='buyer', ref_class_attr='buyer_records'),
Relation(data_class=Records, data_class_attr='seller', ref_class_attr='seller_records')
)
HasRecord = create_polymorphic_base(relations=relations)
class Org(db.Model, HasRecord, HasVehicle):
__tablename__ = "org"
id = Column(Integer, primary_key=True, autoincrement=True)
def __repr__(self):
return '< Org id: {} >'.format(self.id)
class Company(BaseInitializer, db.Model, HasRecord):
__tablename__ = "company"
id = Column(Integer, primary_key=True, autoincrement=True)
dealer_id = Column(Integer, nullable=False)
dealer = NetModel(field='dealer_id', _class=Dealer)
class VehicleReferencePrice(BaseInitializer, db.Model):
__tablename__ = "vehicle_reference_prices"
id = Column(Integer, primary_key=True, autoincrement=True)
source_id = Column(String(50), nullable=False)
source_type = Column(String(50), nullable=False)
source = PolyField(prefix='source')
HasVehicleReferencePrices = create_polymorphic_base(data_class=VehicleReferencePrice,
data_class_attr='source')
class FairEstimatedValue(BaseInitializer, db.Model, HasVehicleReferencePrices):
__tablename__ = "fair_estimated_value"
id = Column(Integer, primary_key=True, autoincrement=True)
class SomeRecord(BaseInitializer, db.Model, HasVehicleReferencePrices):
__tablename__ = "some_records"
id = Column(Integer, primary_key=True, autoincrement=True)
# ------------- Single Table InheritAnce -------------
class SourceOfData(db.Model):
__tablename__ = 'juices'
id = Column(Integer, primary_key=True, autoincrement=True)
filter_type = Column(String(16), nullable=False)
ADS_FILTER_TYPE = 'ads'
NEWS_FILTER_TYPE = 'news'
__mapper_args__ = {
'polymorphic_on': filter_type
}
class AdsData(SourceOfData, HasVehicle):
__mapper_args__ = {
'polymorphic_identity': SourceOfData.ADS_FILTER_TYPE
}
class NewsData(SourceOfData, HasVehicle):
__mapper_args__ = {
'polymorphic_identity': SourceOfData.NEWS_FILTER_TYPE
}
# ---------- Joint table inheritence --------------
class VehicleReferencePriceB(BaseInitializer, db.Model):
__tablename__ = "vehicle_reference_prices_b"
id = Column(Integer, primary_key=True, autoincrement=True)
price_type = Column(String(50), nullable=False)
vehicle_reference_price_sources = relationship('VehicleReferencePriceSource',
back_populates='vehicle_reference_price')
sources = association_proxy('vehicle_reference_price_sources', 'source',
creator=lambda src: VehicleReferencePriceSource(source=src))
__mapper_args__ = {
'polymorphic_on': price_type
}
class VehicleReferencePriceSource(BaseInitializer, db.Model):
__tablename__ = "vehicle_reference_price_sources"
id = Column(Integer, primary_key=True, autoincrement=True)
vehicle_reference_price_id = Column(Integer, ForeignKey('vehicle_reference_prices_b.id'))
source_id = Column(String(50), nullable=False)
source_type = Column(String(50), nullable=False)
source = PolyField(prefix='source')
vehicle_reference_price = relationship('VehicleReferencePriceB', back_populates='vehicle_reference_price_sources')
HasVehicleReferencePrices = create_polymorphic_base(data_class=VehicleReferencePriceSource,
data_class_attr='source',
ref_class_attr='vehicle_reference_price_sources_list')
class FairEstimatedValueB(VehicleReferencePriceB, HasVehicleReferencePrices):
__mapper_args__ = {
'polymorphic_identity': 'fair_estimated_value'
}
class PredictedResidual(BaseInitializer, db.Model, HasVehicleReferencePrices):
__tablename__ = "predicted_residuals"
id = Column(Integer, primary_key=True, autoincrement=True)
# ---------- When source_type does not chnage --------------
# NOTE: instead of using net relationship in this case, you should use NetModel!
class BMWVehicles(BaseInitializer, db.Model):
__tablename__ = "bmw_vehicles"
id = Column(Integer, primary_key=True, autoincrement=True)
source_id = Column(String(50), nullable=False)
source_type = 'dealer'
source = PolyField(prefix='source')
source__dealer = NetRelationship(prefix='source', _class=Dealer)
|
80032
|
import torch
import numpy as np
import torch.nn as nn
from mmcv.cnn import normal_init
from ..registry import HEADS
from ..utils import ConvModule, bias_init_with_prob
from .anchor_head import AnchorHead
from mmdet.core import (delta2bbox, force_fp32,
multiclass_nms_with_feat)
""" RetinaHead that provides rich feature information.
embed_feats is currently set to cls_score.
"""
@HEADS.register_module
class RichRetinaHead(AnchorHead):
def __init__(self,
num_classes,
in_channels,
stacked_convs=4,
octave_base_scale=4,
scales_per_octave=3,
conv_cfg=None,
norm_cfg=None,
freeze_all=False,
**kwargs):
self.stacked_convs = stacked_convs
self.octave_base_scale = octave_base_scale
self.scales_per_octave = scales_per_octave
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.freeze_all = freeze_all
octave_scales = np.array(
[2**(i / scales_per_octave) for i in range(scales_per_octave)])
anchor_scales = octave_scales * octave_base_scale
super(RichRetinaHead, self).__init__(
num_classes, in_channels, anchor_scales=anchor_scales, **kwargs)
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = self.in_channels if i == 0 else self.feat_channels
self.cls_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.reg_convs.append(
ConvModule(
chn,
self.feat_channels,
3,
stride=1,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(
self.feat_channels,
self.num_anchors * self.cls_out_channels,
3,
padding=1)
self.retina_reg = nn.Conv2d(
self.feat_channels, self.num_anchors * 4, 3, padding=1)
if self.freeze_all:
def _freeze_conv(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.requires_grad = False
self.apply(_freeze_conv)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward_single(self, x):
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return cls_score, bbox_pred, cls_score
@force_fp32(apply_to=('cls_scores', 'bbox_preds', 'embed_feats'))
def get_bboxes(self, cls_scores, bbox_preds, embed_feats, img_metas, cfg,
rescale=False):
assert len(cls_scores) == len(bbox_preds)
num_levels = len(cls_scores)
device = cls_scores[0].device
mlvl_anchors = [
self.anchor_generators[i].grid_anchors(
cls_scores[i].size()[-2:],
self.anchor_strides[i],
device=device) for i in range(num_levels)
]
result_list = []
for img_id in range(len(img_metas)):
cls_score_list = [
cls_scores[i][img_id].detach() for i in range(num_levels)
]
bbox_pred_list = [
bbox_preds[i][img_id].detach() for i in range(num_levels)
]
embed_feats_list = [
embed_feats[i][img_id].detach() for i in range(num_levels)
]
img_shape = img_metas[img_id]['img_shape']
scale_factor = img_metas[img_id]['scale_factor']
proposals = self.get_bboxes_single(cls_score_list, bbox_pred_list,
embed_feats_list,
mlvl_anchors, img_shape,
scale_factor, cfg, rescale)
result_list.append(proposals)
return result_list
def get_bboxes_single(self,
cls_score_list,
bbox_pred_list,
embed_feats_list,
mlvl_anchors,
img_shape,
scale_factor,
cfg,
rescale=False):
assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)
mlvl_bboxes = []
mlvl_scores = []
mlvl_feats = []
feat_chans = self.cls_out_channels
for cls_score, bbox_pred, feat, anchors in zip(cls_score_list,
bbox_pred_list,
embed_feats_list, mlvl_anchors):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
if self.use_sigmoid_cls:
scores = cls_score.sigmoid()
else:
scores = cls_score.softmax(-1)
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)
# Feat: [#A, C, H, W] -> [#A*H*W, C]
feat = feat.permute(1, 2, 0).reshape(-1, feat_chans)
nms_pre = cfg.get('nms_pre', -1)
if nms_pre > 0 and scores.shape[0] > nms_pre:
# Get maximum scores for foreground classes.
if self.use_sigmoid_cls:
max_scores, _ = scores.max(dim=1)
else:
max_scores, _ = scores[:, 1:].max(dim=1)
_, topk_inds = max_scores.topk(nms_pre)
anchors = anchors[topk_inds, :]
bbox_pred = bbox_pred[topk_inds, :]
scores = scores[topk_inds, :]
feat = feat[topk_inds, :]
bboxes = delta2bbox(anchors, bbox_pred, self.target_means,
self.target_stds, img_shape)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
mlvl_feats.append(feat)
mlvl_bboxes = torch.cat(mlvl_bboxes)
if rescale:
mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)
mlvl_scores = torch.cat(mlvl_scores)
mlvl_feats = torch.cat(mlvl_feats)
if self.use_sigmoid_cls:
# Add a dummy background class to the front when using sigmoid
padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)
mlvl_scores = torch.cat([padding, mlvl_scores], dim=1)
det_bboxes, det_labels, det_feats = multiclass_nms_with_feat(
mlvl_bboxes, mlvl_scores, mlvl_feats, cfg.score_thr, cfg.nms, cfg.max_per_img)
return det_bboxes, det_labels, det_feats
|
80078
|
from .fixture import FakeProcesses
from .wget import Wget
from .systemctl import Systemctl
from .dpkg import Dpkg
__all__ = [
"FakeProcesses",
"Wget",
"Systemctl",
"Dpkg",
]
|
80085
|
import logging
import os
import threading
import time
import curio
import pytest
try:
import catvs
import catvs.server
except ImportError:
catvs = None
import caproto as ca
from caproto.tests.verify_with_catvs import CatvsIOC
logger = logging.getLogger(__name__)
# logging.getLogger('caproto').setLevel('DEBUG')
def server_thread(context):
async def server():
return await context.run(log_pv_names=True)
with curio.Kernel() as kernel:
kernel.run(server)
@pytest.fixture(params=['curio'], # 'trio', 'asyncio', 'epics-base'],
scope='function')
def catvs_ioc(request):
from caproto.curio.server import Context
pvgroup = CatvsIOC(prefix='')
# NOTE: catvs expects server tcp_port==udp_port, so make a weak attempt
# here to avoid clashing between servers
port = list(ca.random_ports(1))[0]
try:
# The environment variale only needs to e set for the initializer of
# Context.
os.environ['EPICS_CA_SERVER_PORT'] = str(port)
context = Context(pvgroup.pvdb, ['127.0.0.1'])
finally:
os.environ['EPICS_CA_SERVER_PORT'] = '5064'
thread = threading.Thread(target=server_thread, daemon=True,
args=(context, ))
thread.start()
def stop_server():
context.log.setLevel('INFO')
context.stop()
request.addfinalizer(stop_server)
while getattr(context, 'port', None) is None:
logger.info('Waiting on catvs test server...')
time.sleep(0.1)
tcp_port = context.port
udp_port = context.ca_server_port
logger.info('catvs test server started up on port %d (udp port %d)',
tcp_port, udp_port)
time.sleep(0.5)
return pvgroup, context, thread
def hacked_setup(test_inst, port):
test_inst.testport = port
if isinstance(test_inst, catvs.util.TestMixinClient):
catvs.util.TestMixinClient.setUp(test_inst)
if isinstance(test_inst, catvs.util.TestMixinServer):
catvs.util.TestMixinServer.setUp(test_inst)
def hacked_teardown(test_inst):
...
if catvs is None:
all_tests = []
else:
def get_all_tests():
def get_tests(cls):
return [(cls, attr) for attr in dir(cls)
if attr.startswith('test_')]
from catvs.server.test_chan import TestChannel
from catvs.server.test_ops import TestArray, TestScalar
from catvs.server.test_search import TestSearchTCP, TestSearchUDP
return sum((get_tests(cls)
for cls in [TestChannel, TestScalar, TestArray,
TestSearchTCP, TestSearchUDP]),
[])
all_tests = get_all_tests()
SKIPPED = ('TestScalar-test_get_bad',
'TestScalar-test_put',
'TestArray-test_monitor_three_fixed',
'TestArray-test_monitor_zero_dynamic',
'TestArray-test_put',
)
@pytest.mark.skipif(catvs is None, reason='catvs unavailable')
@pytest.mark.parametrize('test_class, test_name', all_tests)
def test_catvs(catvs_ioc, test_class, test_name):
if f'{test_class.__name__}-{test_name}' in SKIPPED:
pytest.skip("known difference in behavior with epics-base")
pvgroup, context, server_thread = catvs_ioc
test_inst = test_class()
def assert_equal(a, b, msg=None):
if msg is not None:
assert a == b, msg
else:
assert a == b
def assert_ca_equal(msg, **kwargs):
received = dict((name, getattr(msg, name))
for name in kwargs)
expected = kwargs
assert received == expected
test_inst.assertEqual = assert_equal
test_inst.assertCAEqual = assert_ca_equal
port = (context.ca_server_port if 'udp' in test_name.lower()
else context.port)
hacked_setup(test_inst, port)
test_func = getattr(test_inst, test_name)
test_func()
|
80097
|
import flask
app = flask.Flask(__name__)
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
from flask.ext.babel import Babel
babel = Babel(app)
from flask import render_template
from flask.ext.babel import gettext as _, ngettext
@babel.localeselector
def get_locale():
return 'fr'
@app.route("/")
def index():
brittany = _('Brittany')
france = _('France')
return render_template('index.html',
some_text=_("I am a sausage."),
best_part=_("%(part)s is the best part of %(country)s.", part=brittany, country=france),
singular=ngettext('I bought a garlic glove this morning.', 'I bought %(num)d garlic gloves this morning.', 1),
plural=ngettext('I bought a garlic glove this morning.', 'I bought %(num)d garlic gloves this morning.', 42))
if __name__ == "__main__":
app.run(host="0.0.0.0")
|
80117
|
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
import time
def get_package():
return 'org.solovyev.android.messenger'
def get_start_activity():
return '.StartActivity'
def get_actions(device_name):
tests = {'startMainActivity': lambda device: start_activity(device, '.StartActivity'),
'startPreferencesActivity': lambda device: start_activity(device, '.preferences.PreferencesActivity'),
'startAccountsActivity': lambda device: start_activity(device, '.accounts.AccountsActivity'),
'startAboutActivity': lambda device: start_activity(device, '.about.AboutActivity'),
'openMenu': open_menu
}
for name, action in get_device_actions().get(device_name, {}).iteritems():
tests[name] = action
return tests
def get_device_actions():
return {'Nexus-4': {
'scrollContacts': scroll_contacts,
'openContact': open_contact,
'openContactAndReturn': open_contact_and_return
}}
# Util methods
def get_display_width(device):
return int(device.getProperty('display.width'))
def get_display_height(device):
return int(device.getProperty('display.height'))
# Actions
def open_menu(device):
start_activity(device, '.StartActivity')
device.press('KEYCODE_MENU', MonkeyDevice.DOWN_AND_UP)
def scroll_contacts(device, should_start_activity=True):
if should_start_activity:
start_activity(device, '.StartActivity')
time.sleep(2)
width = get_display_width(device)
height = get_display_height(device)
device.drag((width / 2, height / 2 + height / 3), (width / 2, height / 2), 0.2, 1)
def open_contact(device):
scroll_contacts(device)
time.sleep(2)
x = get_display_width(device) / 2
y = get_display_height(device) / 2
device.touch(x, y, MonkeyDevice.DOWN_AND_UP)
def open_contact_and_return(device):
open_contact(device)
time.sleep(3)
device.press('KEYCODE_BACK', MonkeyDevice.DOWN_AND_UP)
def filter_contacts(device):
easy_device = EasyMonkeyDevice(device)
start_activity(device, '.StartActivity')
easy_device.touch(By.id('id/mpp_menu_toggle_filter_box'), MonkeyDevice.DOWN_AND_UP)
def start_activity(device, activity):
run_component = get_package() + '/' + get_package() + activity
device.startActivity(component=run_component)
|
80213
|
import os
import numpy as np
import argparse
import os.path as osp
import json
from tqdm import tqdm
from mmcv import mkdir_or_exist
def getFlying3dMetas(root, Type, data_type='clean'):
Metas = []
imgDir = 'flyingthings3d/frames_' + data_type + 'pass'
dispDir = 'flyingthings3d/disparity'
Parts = ['A', 'B', 'C']
for Part in Parts:
partDir = osp.join(root, dispDir, Type, Part)
idxDirs = os.listdir(partDir)
for idxDir in idxDirs:
dispNames = os.listdir(osp.join(partDir, idxDir, 'left'))
imgNames = ["{}.png".format(name.split('.')[0]) for name in dispNames]
for imgName, dispName in zip(imgNames, dispNames):
meta = dict(
left_image_path=osp.join(
imgDir, Type, Part, idxDir, 'left', imgName
),
right_image_path=osp.join(
imgDir, Type, Part, idxDir, 'right', imgName
),
left_disp_map_path=osp.join(
dispDir, Type, Part, idxDir, 'left', dispName
),
right_disp_map_path=osp.join(
dispDir, Type, Part, idxDir, 'right', dispName
),
)
Metas.append(meta)
return Metas
def getMonkaaMetas(root, data_type='clean'):
Metas = []
imgDir = 'Monkaa/frames_' + data_type + 'pass'
dispDir = 'Monkaa/disparity'
sceneDirs = os.listdir(osp.join(root, dispDir))
for sceneDir in sceneDirs:
dispNames = os.listdir(osp.join(root, dispDir, sceneDir, 'left'))
imgNames = ["{}.png".format(name.split('.')[0]) for name in dispNames]
for imgName, dispName in zip(imgNames, dispNames):
meta = dict(
left_image_path=osp.join(
imgDir, sceneDir, 'left', imgName
),
right_image_path=osp.join(
imgDir, sceneDir, 'right', imgName
),
left_disp_map_path=osp.join(
dispDir, sceneDir, 'left', dispName
),
right_disp_map_path=osp.join(
dispDir, sceneDir, 'right', dispName
),
)
Metas.append(meta)
return Metas
def getDrivingMetas(root, data_type='clean'):
Metas = []
imgDir = 'driving/frames_' + data_type + 'pass'
dispDir = 'driving/disparity'
focalLengthDirs = os.listdir(osp.join(root, dispDir))
for focalLengthDir in focalLengthDirs:
wardDirs = os.listdir(osp.join(root, dispDir, focalLengthDir))
for wardDir in wardDirs:
speedDirs = os.listdir(osp.join(root, dispDir, focalLengthDir, wardDir))
for speedDir in speedDirs:
dispNames = os.listdir(osp.join(root, dispDir, focalLengthDir, wardDir, speedDir, 'left'))
imgNames = ["{}.png".format(name.split('.')[0]) for name in dispNames]
for imgName, dispName in zip(imgNames, dispNames):
meta = dict(
left_image_path=osp.join(
imgDir, focalLengthDir, wardDir, speedDir, 'left', imgName
),
right_image_path=osp.join(
imgDir, focalLengthDir, wardDir, speedDir, 'right', imgName
),
left_disp_map_path=osp.join(
dispDir, focalLengthDir, wardDir, speedDir, 'left', dispName
),
right_disp_map_path=osp.join(
dispDir, focalLengthDir, wardDir, speedDir, 'right', dispName
),
)
Metas.append(meta)
return Metas
def build_annoFile(root, save_annotation_root, data_type='clean'):
"""
Build annotation files for Scene Flow Dataset.
Args:
root:
"""
# check existence
assert osp.exists(root), 'Path: {} not exists!'.format(root)
mkdir_or_exist(save_annotation_root)
trainMetas = getFlying3dMetas(root, 'TRAIN', data_type)
testMetas = getFlying3dMetas(root, 'TEST', data_type)
trainMetas.extend(getMonkaaMetas(root, data_type))
trainMetas.extend(getDrivingMetas(root, data_type))
for meta in tqdm(trainMetas):
for k, v in meta.items():
assert osp.exists(osp.join(root, v)), 'trainMetas:{} not exists'.format(v)
for meta in tqdm(testMetas):
for k, v in meta.items():
assert osp.exists(osp.join(root, v)), 'testMetas: {} not exists'.format(v)
info_str = 'SceneFlow Dataset contains:\n' \
' {:5d} training samples \n' \
' {:5d} validation samples'.format(len(trainMetas), len(testMetas))
print(info_str)
def make_json(name, metas):
filepath = osp.join(save_annotation_root, data_type + 'pass_' + name + '.json')
print('Save to {}'.format(filepath))
with open(file=filepath, mode='w') as fp:
json.dump(metas, fp=fp)
make_json(name='train', metas=trainMetas)
make_json(name='test', metas=testMetas)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SceneFlow Data PreProcess.")
parser.add_argument(
"--data-root",
default=None,
help="root of data",
type=str,
)
parser.add_argument(
"--save-annotation-root",
default='./',
help="save root of generated annotation file",
type=str,
)
parser.add_argument(
"--data-type",
default='clean',
help="the type of data, (clean or final)pass",
type=str,
)
args = parser.parse_args()
build_annoFile(args.data_root, args.save_annotation_root, args.data_type)
|
80215
|
from unittest import TestCase
import numpy as np
from athena import NonlinearLevelSet, ForwardNet, BackwardNet, Normalizer
import torch
import os
from contextlib import contextmanager
import matplotlib.pyplot as plt
@contextmanager
def assert_plot_figures_added():
"""
Assert that the number of figures is higher than
when you started the test
"""
num_figures_before = plt.gcf().number
yield
num_figures_after = plt.gcf().number
assert num_figures_before < num_figures_after
def read_data():
data = np.loadtxt('tests/data/naca0012.txt', skiprows=1, delimiter=',')
real_inputs = data[:, 1:19]
n_params = real_inputs.shape[1]
lb = -0.01 * np.ones(n_params)
ub = 0.01 * np.ones(n_params)
normalizer = Normalizer(lb=lb, ub=ub)
# inputs in [-1, 1]
inputs = normalizer.fit_transform(real_inputs)
lift = data[:, 19]
# gradients with respect to normalized inputs
grad_lift = data[:, 21:39]
return inputs, lift, grad_lift
inputs, lift, grad_lift = read_data()
inputs_torch = torch.as_tensor(inputs, dtype=torch.double)
grad_torch = torch.as_tensor(grad_lift, dtype=torch.double)
class TestNonlinearLevelSet(TestCase):
def test_init_n_layers(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.n_layers, 2)
def test_init_active_dim(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.active_dim, 1)
def test_init_lr(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.lr, 0.1)
def test_init_epochs(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.epochs, 100)
def test_init_dh(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.dh, 0.25)
def test_init_forward(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertIsNone(nll.forward)
def test_init_backward(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertIsNone(nll.backward)
def test_init_loss_vec(self):
nll = NonlinearLevelSet(n_layers=2,
active_dim=1,
lr=0.1,
epochs=100,
dh=0.25)
self.assertEqual(nll.loss_vec, [])
def test_train_01(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
self.assertIsInstance(nll.forward, ForwardNet)
def test_train_02(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
self.assertIsInstance(nll.backward, BackwardNet)
def test_train_03(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
self.assertIs(len(nll.loss_vec), 1)
def test_train_04(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
with self.assertRaises(ValueError):
nll.train(inputs=inputs_torch,
gradients=grad_torch,
interactive=True)
def test_train_05(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
with assert_plot_figures_added():
nll.train(inputs=inputs_torch,
gradients=grad_torch,
outputs=lift,
interactive=True)
def test_forward_n_params(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
self.assertEqual(nll.forward.n_params, 9)
def test_backward_n_params(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
self.assertEqual(nll.backward.n_params, 9)
def test_plot_sufficient_summary_01(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
with assert_plot_figures_added():
nll.plot_sufficient_summary(inputs=inputs_torch, outputs=lift)
def test_plot_sufficient_summary_02(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=2, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
with self.assertRaises(ValueError):
nll.plot_sufficient_summary(inputs=inputs_torch, outputs=lift)
def test_plot_loss(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=2)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
with assert_plot_figures_added():
nll.plot_loss()
def test_save_forward(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
outfilename = 'tests/data/saved_forward.pth'
nll.save_forward(outfilename)
self.assertTrue(os.path.exists(outfilename))
self.addCleanup(os.remove, outfilename)
def test_load_forward(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.load_forward(infile='tests/data/forward_test.pth', n_params=18)
self.assertIsInstance(nll.forward, ForwardNet)
def test_save_backward(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.train(inputs=inputs_torch, gradients=grad_torch, interactive=False)
outfilename = 'tests/data/saved_backward.pth'
nll.save_backward(outfilename)
self.assertTrue(os.path.exists(outfilename))
self.addCleanup(os.remove, outfilename)
def test_load_backward(self):
nll = NonlinearLevelSet(n_layers=2, active_dim=1, lr=0.02, epochs=1)
nll.load_backward(infile='tests/data/backward_test.pth', n_params=18)
self.assertIsInstance(nll.backward, BackwardNet)
class TestForwardNet(TestCase):
def test_init_n_params(self):
nll = ForwardNet(n_params=6, n_layers=2, dh=0.25, active_dim=1)
self.assertEqual(nll.n_params, 3)
def test_init_n_layers(self):
nll = ForwardNet(n_params=6, n_layers=2, dh=0.25, active_dim=1)
self.assertEqual(nll.n_layers, 2)
def test_init_dh(self):
nll = ForwardNet(n_params=6, n_layers=2, dh=0.20, active_dim=1)
self.assertEqual(nll.dh, 0.20)
def test_init_omega(self):
nll = ForwardNet(n_params=6, n_layers=2, dh=0.25, active_dim=1)
self.assertEqual(nll.omega, slice(1))
class TestBackwardNet(TestCase):
def test_init_n_params(self):
nll = BackwardNet(n_params=6, n_layers=2, dh=0.25)
self.assertEqual(nll.n_params, 3)
def test_init_n_layers(self):
nll = BackwardNet(n_params=6, n_layers=2, dh=0.25)
self.assertEqual(nll.n_layers, 2)
def test_init_dh(self):
nll = BackwardNet(n_params=6, n_layers=2, dh=0.20)
self.assertEqual(nll.dh, 0.20)
|
80244
|
class ShortCodeError(Exception):
"""Base exception raised when some unexpected event occurs in the shortcode
OAuth flow."""
pass
class UnknownShortCodeError(ShortCodeError):
"""Exception raised when an unknown error happens while running shortcode
OAuth.
"""
pass
class ShortCodeAccessDeniedError(ShortCodeError):
"""Exception raised when the user denies access to the client in shortcode
OAuth."""
pass
class ShortCodeTimeoutError(ShortCodeError):
"""Exception raised when the shortcode expires without being accepted."""
pass
|
80306
|
from functools import partial
from typing import Sequence
import pytest
from torch import Tensor, tensor
from tests.text.helpers import TextTester
from tests.text.inputs import _inputs_multiple_references, _inputs_single_sentence_multiple_references
from torchmetrics.functional.text.chrf import chrf_score
from torchmetrics.text.chrf import CHRFScore
from torchmetrics.utilities.imports import _SACREBLEU_AVAILABLE
if _SACREBLEU_AVAILABLE:
from sacrebleu.metrics import CHRF
def sacrebleu_chrf_fn(
preds: Sequence[str],
targets: Sequence[Sequence[str]],
char_order: int,
word_order: int,
lowercase: bool,
whitespace: bool,
) -> Tensor:
sacrebleu_chrf = CHRF(
char_order=char_order, word_order=word_order, lowercase=lowercase, whitespace=whitespace, eps_smoothing=True
)
# Sacrebleu CHRF expects different format of input
targets = [[target[i] for target in targets] for i in range(len(targets[0]))]
sacrebleu_chrf = sacrebleu_chrf.corpus_score(preds, targets).score / 100
return tensor(sacrebleu_chrf)
@pytest.mark.parametrize(
["char_order", "word_order", "lowercase", "whitespace"],
[
(6, 2, False, False),
(6, 2, False, True),
(4, 2, True, False),
(6, 0, True, False),
(6, 0, True, True),
(4, 0, False, True),
],
)
@pytest.mark.parametrize(
["preds", "targets"],
[(_inputs_multiple_references.preds, _inputs_multiple_references.targets)],
)
@pytest.mark.skipif(not _SACREBLEU_AVAILABLE, reason="test requires sacrebleu")
class TestCHRFScore(TextTester):
@pytest.mark.parametrize("ddp", [False, True])
@pytest.mark.parametrize("dist_sync_on_step", [False, True])
def test_chrf_score_class(
self, ddp, dist_sync_on_step, preds, targets, char_order, word_order, lowercase, whitespace
):
metric_args = {
"n_char_order": char_order,
"n_word_order": word_order,
"lowercase": lowercase,
"whitespace": whitespace,
}
nltk_metric = partial(
sacrebleu_chrf_fn, char_order=char_order, word_order=word_order, lowercase=lowercase, whitespace=whitespace
)
self.run_class_metric_test(
ddp=ddp,
preds=preds,
targets=targets,
metric_class=CHRFScore,
sk_metric=nltk_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
)
def test_chrf_score_functional(self, preds, targets, char_order, word_order, lowercase, whitespace):
metric_args = {
"n_char_order": char_order,
"n_word_order": word_order,
"lowercase": lowercase,
"whitespace": whitespace,
}
nltk_metric = partial(
sacrebleu_chrf_fn, char_order=char_order, word_order=word_order, lowercase=lowercase, whitespace=whitespace
)
self.run_functional_metric_test(
preds,
targets,
metric_functional=chrf_score,
sk_metric=nltk_metric,
metric_args=metric_args,
)
def test_chrf_score_differentiability(self, preds, targets, char_order, word_order, lowercase, whitespace):
metric_args = {
"n_char_order": char_order,
"n_word_order": word_order,
"lowercase": lowercase,
"whitespace": whitespace,
}
self.run_differentiability_test(
preds=preds,
targets=targets,
metric_module=CHRFScore,
metric_functional=chrf_score,
metric_args=metric_args,
)
def test_chrf_empty_functional():
hyp = []
ref = [[]]
assert chrf_score(hyp, ref) == tensor(0.0)
def test_chrf_empty_class():
chrf = CHRFScore()
hyp = []
ref = [[]]
assert chrf(hyp, ref) == tensor(0.0)
def test_chrf_return_sentence_level_score_functional():
hyp = _inputs_single_sentence_multiple_references.preds
ref = _inputs_single_sentence_multiple_references.targets
_, chrf_sentence_score = chrf_score(hyp, ref, return_sentence_level_score=True)
isinstance(chrf_sentence_score, Tensor)
def test_chrf_return_sentence_level_class():
chrf = CHRFScore(return_sentence_level_score=True)
hyp = _inputs_single_sentence_multiple_references.preds
ref = _inputs_single_sentence_multiple_references.targets
_, chrf_sentence_score = chrf(hyp, ref)
isinstance(chrf_sentence_score, Tensor)
|
80311
|
from share.transform.chain import * # noqa
class AgentIdentifier(Parser):
uri = IRI(ctx)
class WorkIdentifier(Parser):
uri = IRI(ctx)
class Tag(Parser):
name = ctx
class ThroughTags(Parser):
tag = Delegate(Tag, ctx)
class Person(Parser):
given_name = ParseName(ctx.creator).first
family_name = ParseName(ctx.creator).last
additional_name = ParseName(ctx.creator).middle
suffix = ParseName(ctx.creator).suffix
class Creator(Parser):
agent = Delegate(Person, ctx)
cited_as = ctx.creator
order_cited = ctx('index')
class Organization(Parser):
name = ctx.publisher
identifiers = Map(Delegate(AgentIdentifier), Try(IRI(ctx.issn), exceptions=(InvalidIRI, )))
class Extra:
issn = Try(ctx.issn)
class Publisher(Parser):
agent = Delegate(Organization, ctx)
class Extra:
publication_name = ctx.publicationName
class Article(Parser):
title = ctx.title
description = ctx.abstract
rights = ctx.copyright
date_published = ParseDate(ctx.publicationDate)
date_updated = ParseDate(ctx.publicationDate)
identifiers = Map(
Delegate(WorkIdentifier),
ctx.doi,
ctx.identifier,
Map(ctx.value, ctx.url),
)
related_agents = Concat(
Map(Delegate(Creator), ctx.creators),
Map(Delegate(Publisher), ctx)
)
tags = Map(Delegate(ThroughTags), ctx.genre)
class Extra:
openaccess = ctx.openaccess
ending_page = Try(ctx.endingPage)
issue_type = Try(ctx.issuetype)
number = ctx.number
starting_page = ctx.startingPage
topicalCollection = Try(ctx.topicalCollection)
journalid = Try(ctx.journalid)
issn = Try(ctx.issn)
class SpringerTransformer(ChainTransformer):
VERSION = 1
root_parser = Article
|
80326
|
from django.test import TestCase
from addressbase.models import UprnToCouncil, Address
from councils.tests.factories import CouncilFactory
from data_importers.tests.stubs import stub_addressimport
# High-level functional tests for import scripts
class ImporterTest(TestCase):
opts = {"nochecks": True, "verbosity": 0}
def set_up(self, addressbase, uprns, addresses_name):
for address in addressbase:
Address.objects.update_or_create(**address)
for uprn in uprns:
UprnToCouncil.objects.update_or_create(pk=uprn, lad="X01000000")
CouncilFactory(pk="ABC", identifiers=["X01000000"])
cmd = stub_addressimport.Command()
cmd.addresses_name = addresses_name
cmd.handle(**self.opts)
def test_duplicate_uprns(self):
"""
In the csv there are two matching uprns with different polling station ids.
Despite one appearing in addressbase, neither should be imported.
"""
test_params = {
"uprns": ["1", "2", "6"],
"addressbase": [
{
"address": "Another Haringey Park, London",
"uprn": "1",
"postcode": "N8 8NM",
},
{"address": "Haringey Park, London", "uprn": "2", "postcode": "N8 9JG"},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
],
"addresses_name": "duplicate_uprns.csv",
}
self.set_up(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("6", "2")}
self.assertEqual(set(imported_uprns), expected)
def test_uprn_not_in_addressbase(self):
"""uprn does not appear in addressbase data, or in UprnToCouncil table"""
test_params = {
"uprns": ["6"],
"addressbase": [
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
],
"addresses_name": "uprn_missing.csv",
}
self.set_up(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("6", "2")}
self.assertEqual(set(imported_uprns), expected)
def test_uprn_assigned_to_wrong_council(self):
"""Uprn exists but we've located it in a different council in UprnToCouncil table"""
test_params = {
"uprns": ["6"],
"addressbase": [
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
],
"addresses_name": "uprn_missing.csv",
}
self.set_up(**test_params)
UprnToCouncil.objects.update_or_create(pk=4, lad="X01000002")
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("6", "2")}
self.assertEqual(set(imported_uprns), expected)
def test_postcode_mismatch(self):
"""Uprn exists but postcodes don't match"""
test_params = {
"uprns": ["4", "7"],
"addressbase": [
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "4 Factory Rd, Poole",
"uprn": "7",
"postcode": "BH16 5HT", # postcode is 'BH17 5HT' in csv
},
],
"addresses_name": "uprn_missing.csv",
}
self.set_up(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(1, len(imported_uprns))
expected = {("4", "1")}
self.assertEqual(set(imported_uprns), expected)
def test_address_import(self):
test_params = {
"uprns": ["1", "3", "4", "5", "6", "7"],
"addressbase": [
{"address": "Haringey Park, London", "uprn": "1", "postcode": "N8 9JG"},
# uprn '2' in addresses.csv but wasn't in addressbase so not in uprntocouncil either
{
"address": "36 Abbots Park, London",
"uprn": "3",
"postcode": "SW2 3QD",
},
{"address": "3 Factory Rd, Poole", "uprn": "4", "postcode": "BH16 5HT"},
{
"address": "5-6 Mickleton Dr, Southport",
"uprn": "5",
"postcode": "PR8 2QX",
},
{
"address": "80 Pine Vale Cres, Bournemouth",
"uprn": "6",
"postcode": "BH10 6BJ",
},
{
"address": "4 Factory Rd, Poole",
"uprn": "7",
"postcode": "BH16 5HT", # postcode is 'BH17 5HT' in csv
},
],
"addresses_name": "addresses.csv",
}
self.set_up(**test_params)
imported_uprns = (
UprnToCouncil.objects.filter(lad="X01000000")
.exclude(polling_station_id="")
.order_by("uprn")
.values_list("uprn", "polling_station_id")
)
self.assertEqual(3, len(imported_uprns))
expected = {("3", "3"), ("4", "1"), ("6", "2")}
self.assertEqual(set(imported_uprns), expected)
|
80329
|
from django.db import connection
from usaspending_api.common.etl import ETLQuery, ETLTable
from usaspending_api.common.etl.operations import delete_obsolete_rows, insert_missing_rows, update_changed_rows
# This is basically the desired final state of the federal_account table. We can diff this against the
# actual federal_account table and make corrections as appropriate to bring the federal_account table
# into line. Since the treasury_appropriation_account and federal_account tables are fairly small, we
# can perform full diffs with no noticeable performance impact. This sort order is dictated by DEV-3495.
FEDERAL_ACCOUNTS_FROM_TREASURY_ACCOUNTS_SQL = """
select
distinct on (agency_id, main_account_code)
agency_id as agency_identifier,
main_account_code,
concat(agency_id, '-', main_account_code) as federal_account_code,
account_title
from
treasury_appropriation_account
order by
agency_id,
main_account_code,
beginning_period_of_availability desc nulls last,
ending_period_of_availability desc nulls last,
sub_account_code,
allocation_transfer_agency_id,
treasury_account_identifier desc
"""
source_federal_account_query = ETLQuery(FEDERAL_ACCOUNTS_FROM_TREASURY_ACCOUNTS_SQL)
destination_federal_account_table = ETLTable(
"federal_account", key_overrides=["agency_identifier", "main_account_code"]
)
def remove_empty_federal_accounts():
"""
Removes federal accounts that are no longer attached to a TAS.
Returns:
Number of rows updated
"""
return delete_obsolete_rows(source_federal_account_query, destination_federal_account_table)
def update_federal_accounts():
"""
Update existing federal account records based on the latest information
from the TreasuryAppropriationAccount (TAS) table. The account title
for each federal account should reflect the account title of the
a related TAS with the most recent beginning period of availability.
Returns:
Number of rows updated
"""
return update_changed_rows(source_federal_account_query, destination_federal_account_table)
def insert_federal_accounts():
"""
Insert new federal accounts records based on the TreasuryAppropriationAccount
(TAS) table. Each TAS maps to a higher-level federal account, defined
by a unique combination of TAS agency_id (AID) and TAS main account
code (MAC).
"""
return insert_missing_rows(source_federal_account_query, destination_federal_account_table)
def link_treasury_accounts_to_federal_accounts():
"""
Federal accounts are derived from AID (agency identifier) + MAIN (main account code) in treasury accounts.
Using this information, we can link treasury accounts to their corresponding federal account and correct
any accounts that may be mis-linked. Since these tables are relatively small, we can simply perform full
updates with little to no noticeable performance impact.
"""
with connection.cursor() as cursor:
cursor.execute(
"""
update treasury_appropriation_account as tu
set federal_account_id = fa.id
from treasury_appropriation_account as t
left outer join federal_account as fa on
t.agency_id = fa.agency_identifier and
t.main_account_code = fa.main_account_code
where tu.treasury_account_identifier = t.treasury_account_identifier and
tu.federal_account_id is distinct from fa.id;
"""
)
return cursor.rowcount
|
80363
|
import sys, os, io
from twisted.internet import reactor, protocol, task, defer
from twisted.python.procutils import which
from twisted.python import usage
# run the command with python's deprecation warnings turned on, capturing
# stderr. When done, scan stderr for warnings, write them to a separate
# logfile (so the buildbot can see them), and return rc=1 if there were any.
class Options(usage.Options):
optParameters = [
["warnings", None, None, "file to write warnings into at end of test run"],
]
def parseArgs(self, command, *args):
self["command"] = command
self["args"] = list(args)
description = """Run as:
PYTHONWARNINGS=default::DeprecationWarning python run-deprecations.py [--warnings=STDERRFILE] COMMAND ARGS..
"""
class RunPP(protocol.ProcessProtocol):
def outReceived(self, data):
self.stdout.write(data)
sys.stdout.write(data)
def errReceived(self, data):
self.stderr.write(data)
sys.stderr.write(data)
def processEnded(self, reason):
signal = reason.value.signal
rc = reason.value.exitCode
self.d.callback((signal, rc))
@defer.inlineCallbacks
def run_command(main):
config = Options()
config.parseOptions()
command = config["command"]
if "/" in command:
# don't search
exe = command
else:
executables = which(command)
if not executables:
raise ValueError("unable to find '%s' in PATH (%s)" %
(command, os.environ.get("PATH")))
exe = executables[0]
pw = os.environ.get("PYTHONWARNINGS")
DDW = "default::DeprecationWarning"
if pw != DDW:
print "note: $PYTHONWARNINGS is '%s', not the expected %s" % (pw, DDW)
sys.stdout.flush()
pp = RunPP()
pp.d = defer.Deferred()
pp.stdout = io.BytesIO()
pp.stderr = io.BytesIO()
reactor.spawnProcess(pp, exe, [exe] + config["args"], env=None)
(signal, rc) = yield pp.d
# maintain ordering, but ignore duplicates (for some reason, either the
# 'warnings' module or twisted.python.deprecate isn't quashing them)
already = set()
warnings = []
def add(line):
if line in already:
return
already.add(line)
warnings.append(line)
pp.stdout.seek(0)
for line in pp.stdout.readlines():
if "DeprecationWarning" in line:
add(line) # includes newline
pp.stderr.seek(0)
for line in pp.stderr.readlines():
if "DeprecationWarning" in line:
add(line)
if warnings:
if config["warnings"]:
with open(config["warnings"], "wb") as f:
print >>f, "".join(warnings)
print "ERROR: %d deprecation warnings found" % len(warnings)
sys.exit(1)
print "no deprecation warnings"
if signal:
sys.exit(signal)
sys.exit(rc)
task.react(run_command)
|
80371
|
from django.forms import ModelForm
from .models import PatientRegister, DoctorRegister, Emergency
class PatientRegisterForm(ModelForm):
class Meta:
model = PatientRegister
fields = "__all__"
class DoctorRegisterForm(ModelForm):
class Meta:
model = DoctorRegister
fields = "__all__"
class EmergencyForm(ModelForm):
class Meta:
model = Emergency
fields = "__all__"
|
80400
|
from nose.tools import istest, assert_equal
from mammoth.lists import unique
@istest
def unique_of_empty_list_is_empty_list():
assert_equal([], unique([]))
@istest
def unique_removes_duplicates_while_preserving_order():
assert_equal(["apple", "banana"], unique(["apple", "banana", "apple"]))
|
80404
|
import struct
from abc import ABCMeta
from bxcommon import constants
from bxcommon.constants import MSG_NULL_BYTE
from bxcommon.messages.bloxroute.abstract_bloxroute_message import AbstractBloxrouteMessage
class AbstractBlockchainSyncMessage(AbstractBloxrouteMessage):
"""
Message type for requesting/receiving direct blockchain messages for syncing chainstate.
"""
__metaclass__ = ABCMeta
MESSAGE_TYPE = ""
BASE_LENGTH = 12
def __init__(self, command=None, payload=None, buf=None):
if buf is None:
buf = bytearray(self.HEADER_LENGTH + constants.MSG_TYPE_LEN + len(payload))
off = self.HEADER_LENGTH
struct.pack_into("<12s", buf, off, command)
off += constants.MSG_TYPE_LEN
buf[off:off + len(payload)] = payload
self.buf = buf
self._command = None
self._payload = None
payload_length = len(buf) - self.HEADER_LENGTH
super(AbstractBlockchainSyncMessage, self).__init__(self.MESSAGE_TYPE, payload_length, self.buf)
def command(self):
"""
Blockchain command. Can be either a string or an int.
:return:
"""
if self._command is None:
off = self.HEADER_LENGTH
self._command, = struct.unpack_from("<12s", self.buf, off)
self._command = str(self._command).rstrip(MSG_NULL_BYTE)
try:
self._command = int(self._command)
except:
pass
return self._command
def payload(self):
if self._payload is None:
off = self.HEADER_LENGTH + constants.MSG_TYPE_LEN
self._payload = self._memoryview[off:off + self.payload_len()]
return self._payload
|
80508
|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import zip
from builtins import range
from builtins import object
from past.utils import old_div
from nose.tools import (assert_equal, assert_not_equal, assert_almost_equal,
assert_in, raises, assert_is, assert_is_not,
assert_true)
from nose.plugins.skip import Skip, SkipTest
from .test_helpers import (true_func, assert_equal_array_array,
make_1d_traj, assert_items_equal)
import copy
import math
import openpathsampling as paths
from openpathsampling.high_level.move_scheme import *
from openpathsampling.high_level.move_strategy import (
levels,
MoveStrategy, OneWayShootingStrategy, NearestNeighborRepExStrategy,
OrganizeByMoveGroupStrategy, AllSetRepExStrategy
)
import openpathsampling.high_level.move_strategy as strategies
import pytest
import logging
logging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.ensemble').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)
def _make_acceptance_mock_step(mccycle, accepted, path_sim_mover, move_type,
mover_sig, submover_num=None):
root_mover = path_sim_mover.mover
chooser_names = {m.name[:-7].lower(): m for m in root_mover.movers}
chooser = chooser_names[move_type]
sig_to_mover = {frozenset(m.ensemble_signature[0]): m
for m in chooser.movers}
# group_mover = chooser.movers[mover_num]
group_mover = sig_to_mover[frozenset(mover_sig)]
Change = {True: paths.AcceptedSampleMoveChange,
False: paths.RejectedSampleMoveChange}[accepted]
# foo here is because we need non-empty samples to show that we're
# actually accepted or not (WHY?!?!?)
if submover_num is not None:
submover = group_mover.movers[submover_num]
submover_change = Change(samples=['foo'], mover=submover)
group_mover_change = paths.RandomChoiceMoveChange(
subchange=submover_change,
mover=group_mover
)
else:
submover_change = None
group_mover_change = Change(samples=['foo'], mover=group_mover)
chooser_change = paths.RandomChoiceMoveChange(
subchange=group_mover_change,
mover=chooser
)
root_mover_change = paths.RandomChoiceMoveChange(
subchange=chooser_change,
mover=root_mover
)
path_sim_change = paths.PathSimulatorMoveChange(
subchange=root_mover_change,
mover=path_sim_mover
)
step = paths.MCStep(
mccycle=mccycle,
active=paths.SampleSet([]),
change=path_sim_change
)
return step
def _make_null_mover_step(mccycle, path_sim_mover, null_mover):
empty_sample_set = paths.SampleSet([])
change = paths.PathSimulatorMoveChange(
mover=path_sim_mover,
subchange=null_mover.move(empty_sample_set)
)
step = paths.MCStep(
mccycle=mccycle,
active=empty_sample_set,
change=change
)
return step
class TestMoveAcceptanceAnalysis(object):
def setup(self):
self.HAS_TQDM = paths.progress.HAS_TQDM
paths.progress.HAS_TQDM = False
paths.InterfaceSet._reset()
cvA = paths.FunctionCV(name="xA", f=lambda s : s.xyz[0][0])
cvB = paths.FunctionCV(name="xB", f=lambda s : -s.xyz[0][0])
state_A = paths.CVDefinedVolume(cvA, float("-inf"), -0.5).named("A")
state_B = paths.CVDefinedVolume(cvB, float("-inf"), -0.5).named("B")
interfaces_A = paths.VolumeInterfaceSet(cvA, float("-inf"),
[-0.5, -0.3])
network = paths.MISTISNetwork([(state_A, interfaces_A, state_B)])
self.scheme = MoveScheme(network)
self.scheme.append(OneWayShootingStrategy())
self.scheme.append(NearestNeighborRepExStrategy())
self.scheme.append(OrganizeByMoveGroupStrategy())
root_mover = self.scheme.move_decision_tree()
path_sim_mover = paths.PathSimulatorMover(root_mover, None)
null_mover = paths.IdentityPathMover(counts_as_trial=False)
ens_0 = network.sampling_ensembles[0]
ens_1 = network.sampling_ensembles[1]
# acc repex ens1-2
# acc fwd ens1
# acc bkwd ens2
# rej bkwd ens1
# rej repex ens1-2
step_info = [
(1, True, path_sim_mover, 'repex', [ens_0, ens_1], None),
(2, True, path_sim_mover, 'shooting', [ens_0], 0),
(3, True, path_sim_mover, 'shooting', [ens_1], 1),
(4, False, path_sim_mover, 'shooting', [ens_0], 1),
(5, False, path_sim_mover, 'repex', [ens_0, ens_1], None)
]
self.steps = [_make_acceptance_mock_step(*info)
for info in step_info]
self.null_mover_6 = _make_null_mover_step(6, path_sim_mover,
null_mover)
self.null_mover_change_key = [(None, str([path_sim_mover, [None]]))]
acceptance_empty = MoveAcceptanceAnalysis(self.scheme)
acceptance = MoveAcceptanceAnalysis(self.scheme)
acceptance.add_steps(self.steps)
acceptance_null = MoveAcceptanceAnalysis(self.scheme)
acceptance_null.add_steps(self.steps + [self.null_mover_6])
self.analysis = {'empty': acceptance_empty,
'normal': acceptance,
'with_null': acceptance_null}
def teardown(self):
paths.progress.HAS_TQDM = self.HAS_TQDM
@pytest.mark.parametrize('step_num', [0, 1, 2, 3, 4])
def test_calculate_step_acceptance(self, step_num):
accepted = [1] if step_num in [0, 1, 2] else [0]
analysis = MoveAcceptanceAnalysis(self.scheme)
analysis._calculate_step_acceptance(self.steps[step_num])
assert len(analysis._trials) == len(analysis._accepted)
len_trials = len(analysis._trials)
assert list(analysis._trials.values()) == [1] * len_trials
assert list(analysis._accepted.values()) == accepted * len_trials
def test_add_steps(self):
# also tests n_total_trials
acceptance = MoveAcceptanceAnalysis(self.scheme)
assert acceptance._n_steps == 0
assert acceptance.n_total_trials == 0
acceptance.add_steps(self.steps)
assert acceptance._n_steps == 5
assert acceptance.n_total_trials == 5
acceptance.add_steps([self.null_mover_6])
assert acceptance._n_steps == 6
assert acceptance.n_total_trials == 5
@pytest.mark.parametrize('simulation', ['empty', 'normal', 'with_null'])
def test_no_move_keys(self, simulation):
analysis = self.analysis[simulation]
expected = {'empty': [],
'normal': [],
'with_null': self.null_mover_change_key}[simulation]
assert analysis.no_move_keys == expected
def test_select_movers_none(self):
analysis = self.analysis['normal'] # doesn't matter which
select_movers = analysis._select_movers
scheme_movers = {k: self.scheme.movers[k]
for k in ['shooting', 'repex']}
assert select_movers(None) == scheme_movers
@pytest.mark.parametrize('group_name', ['shooting', 'repex'])
def test_select_movers_groupname(self, group_name):
analysis = self.analysis['normal'] # doesn't matter which
select_movers = analysis._select_movers
expected = {mover: [mover]
for mover in self.scheme.movers[group_name]}
assert select_movers(group_name) == expected
@pytest.mark.parametrize('group_name', ['shooting', 'repex'])
def test_select_movers_mover(self, group_name):
analysis = self.analysis['normal'] # doesn't matter which
select_movers = analysis._select_movers
input_movers = self.scheme.movers[group_name]
for mover in input_movers:
try:
extra_movers = mover.movers
except AttributeError:
extra_movers = []
expected = {m: [m] for m in [mover] + extra_movers}
assert select_movers(mover) == expected
@pytest.mark.parametrize('simulation', ['empty', 'normal', 'with_null'])
def test_summary_data_none(self, simulation):
results = self.analysis[simulation].summary_data(None)
expected_results_empty = {
'shooting': {'move_name': 'shooting',
'n_accepted': 0,
'n_trials': 0,
'expected_frequency': 0.8},
'repex': {'move_name': 'repex',
'n_accepted': 0,
'n_trials': 0,
'expected_frequency': 0.2}
}
expected_results_non_empty = {
'shooting': {'move_name': 'shooting',
'n_accepted': 2,
'n_trials': 3,
'expected_frequency': 0.8},
'repex': {'move_name': 'repex',
'n_accepted': 1,
'n_trials': 2,
'expected_frequency': 0.2}
}
expected = {'empty': expected_results_empty,
'normal': expected_results_non_empty,
'with_null': expected_results_non_empty}[simulation]
for result in results:
assert result._asdict() == expected[result.move_name]
@pytest.mark.parametrize('group_name', ['shooting', 'repex'])
@pytest.mark.parametrize('simulation', ['empty', 'normal', 'with_null'])
def test_summary_data_groupname(self, group_name, simulation):
results = self.analysis[simulation].summary_data(group_name)
scheme = self.scheme
analysis = self.analysis[simulation]
for i, mover in enumerate(scheme.movers['shooting']):
print(i, mover, [v for k, v in analysis._trials.items()
if k[0] == mover])
expected_results_empty = {
'shooting': [{'move_name': scheme.movers['shooting'][0],
'expected_frequency': 0.4,
'n_accepted': 0,
'n_trials': 0},
{'move_name': scheme.movers['shooting'][1],
'expected_frequency': 0.4,
'n_accepted': 0,
'n_trials': 0}],
'repex': [{'move_name': scheme.movers['repex'][0],
'expected_frequency': 0.2,
'n_accepted': 0,
'n_trials': 0}]
}
expected_results_non_empty = {
'shooting': [{'move_name': scheme.movers['shooting'][0],
'expected_frequency': 0.4,
'n_accepted': 1,
'n_trials': 2},
{'move_name': scheme.movers['shooting'][1],
'expected_frequency': 0.4,
'n_accepted': 1,
'n_trials': 1}],
'repex': [{'move_name': scheme.movers['repex'][0],
'expected_frequency': 0.2,
'n_accepted': 1,
'n_trials': 2}]
}
expected_list = {'empty': expected_results_empty,
'normal': expected_results_non_empty,
'with_null': expected_results_non_empty
}[simulation][group_name]
expected = {res['move_name']: res for res in expected_list}
for result in results:
assert result._asdict() == expected[result.move_name]
@pytest.mark.parametrize('simulation', ['empty', 'normal', 'with_null'])
@pytest.mark.parametrize('mover_ensemble', [0, 1])
def test_summary_data_mover(self, simulation, mover_ensemble):
mover = self.scheme.movers['shooting'][mover_ensemble]
results = self.analysis[simulation].summary_data(mover)
expected_list = [
{'move_name': mover,
# 'expected_frequency': 0.4,
'n_accepted': 0,
'n_trials': 0},
{'move_name': mover.movers[0],
# 'expected_frequency': float('nan'),
'n_accepted': 0,
'n_trials': 0},
{'move_name': mover.movers[1],
# 'expected_frequency': float('nan'),
'n_accepted': 0,
'n_trials': 0}
]
updates = {
0: {mover: {'n_accepted': 1, 'n_trials': 2},
mover.movers[0]: {'n_accepted': 1, 'n_trials': 1},
mover.movers[1]: {'n_accepted': 0, 'n_trials': 1}},
1: {mover: {'n_accepted': 1, 'n_trials': 1},
mover.movers[0]: {'n_accepted': 0, 'n_trials': 0},
mover.movers[1]: {'n_accepted': 1, 'n_trials': 1}}
}
expected = {elem['move_name']: elem for elem in expected_list}
if simulation in ['normal', 'with_null']:
update = updates[mover_ensemble]
for m, dct in expected.items():
dct.update(update[m])
for result in results:
# trickiness here because 'nan' != 'nan'
result_dict = result._asdict()
result_freq = result_dict.pop('expected_frequency')
if result.move_name == mover:
assert result_freq == 0.4
else:
assert math.isnan(result_freq)
assert result_dict == expected[result.move_name]
@pytest.mark.parametrize('simulation', ['normal', 'with_null'])
def test_line_as_text(self, simulation):
line = MoveAcceptanceAnalysisLine(
move_name='shooting',
n_accepted=2,
n_trials=3,
expected_frequency=0.8
)
expected = ("shooting ran 60.000% (expected 80.00%) of the "
+ "cycles with acceptance 2/3 (66.67%)\n")
result = self.analysis[simulation]._line_as_text(line)
assert result == expected
def test_line_as_text_nan_acceptance(self):
line = MoveAcceptanceAnalysisLine(
move_name='path_reversal',
n_accepted=0,
n_trials=0,
expected_frequency=float('nan')
)
expected = ("path_reversal ran 0.000% (expected nan%) of the "
+ "cycles with acceptance 0/0 (nan%)\n")
result = self.analysis['normal']._line_as_text(line)
assert result == expected
def test_line_as_text_mover_as_name(self):
mover = self.scheme.movers['shooting'][0]
line = MoveAcceptanceAnalysisLine(
move_name=mover,
n_accepted=1,
n_trials=2,
expected_frequency=0.4
)
expected = (str(mover) + " ran 40.000% (expected 40.00%) "
+ "of the cycles with acceptance 1/2 (50.00%)\n")
result = self.analysis['normal']._line_as_text(line)
assert result == expected
@pytest.mark.parametrize('simulation', ['normal', 'with_null'])
def test_format_as_text(self, simulation):
analysis = self.analysis[simulation]
summary_data = analysis.summary_data(None)
text_lines = {
'shooting': ("shooting ran 60.000% (expected 80.00%) of the "
+ "cycles with acceptance 2/3 (66.67%)\n"),
'repex': ("repex ran 40.000% (expected 20.00%) of the "
+ "cycles with acceptance 1/2 (50.00%)\n")
}
expected = "".join([text_lines[line.move_name]
for line in summary_data])
if simulation == 'with_null':
expected = ("Null moves for 1 cycles. Excluding null moves:\n"
+ expected)
assert analysis.format_as_text(summary_data) == expected
class TestMoveScheme(object):
def setup(self):
paths.InterfaceSet._reset()
cvA = paths.FunctionCV(name="xA", f=lambda s : s.xyz[0][0])
cvB = paths.FunctionCV(name="xB", f=lambda s : -s.xyz[0][0])
self.stateA = paths.CVDefinedVolume(cvA, float("-inf"), -0.5)
self.stateB = paths.CVDefinedVolume(cvB, float("-inf"), -0.5)
interfacesA = paths.VolumeInterfaceSet(cvA, float("-inf"),
[-0.5, -0.3, -0.1])
interfacesB = paths.VolumeInterfaceSet(cvB, float("-inf"),
[-0.5, -0.3, -0.1])
network = paths.MSTISNetwork(
[(self.stateA, interfacesA),
(self.stateB, interfacesB)],
ms_outers=paths.MSOuterTISInterface.from_lambdas(
{interfacesA: 0.0, interfacesB: 0.0}
)
)
self.scheme = MoveScheme(network)
def test_append_individuals_default_levels(self):
shootstrat = OneWayShootingStrategy()
repexstrat = NearestNeighborRepExStrategy()
defaultstrat = OrganizeByMoveGroupStrategy()
assert_equal(len(list(self.scheme.strategies.keys())), 0)
self.scheme.append(shootstrat)
self.scheme.append(repexstrat)
self.scheme.append(defaultstrat)
strats = self.scheme.strategies
assert_equal(len(list(strats.keys())), 3)
pairs = [(levels.MOVER, shootstrat), (levels.SIGNATURE, repexstrat),
(levels.GLOBAL, defaultstrat)]
for (k, v) in pairs:
assert_in(v, strats[k])
def test_append_groups_default_levels(self):
shootstrat = OneWayShootingStrategy()
repexstrat = NearestNeighborRepExStrategy()
defaultstrat = OrganizeByMoveGroupStrategy()
assert_equal(len(list(self.scheme.strategies.keys())), 0)
self.scheme.append([shootstrat, repexstrat, defaultstrat])
strats = self.scheme.strategies
assert_equal(len(list(strats.keys())), 3)
pairs = [(levels.MOVER, shootstrat), (levels.SIGNATURE, repexstrat),
(levels.GLOBAL, defaultstrat)]
for (k, v) in pairs:
assert_in(v, strats[k])
def test_append_individuals_custom_levels(self):
shootstrat = OneWayShootingStrategy()
repexstrat = NearestNeighborRepExStrategy()
defaultstrat = OrganizeByMoveGroupStrategy()
assert_equal(len(list(self.scheme.strategies.keys())), 0)
self.scheme.append(shootstrat, 60)
self.scheme.append(repexstrat, 60)
self.scheme.append(defaultstrat, 60)
strats = self.scheme.strategies
assert_equal(len(list(strats.keys())), 1)
assert_items_equal(strats[60], [shootstrat, repexstrat, defaultstrat])
def test_append_groups_same_custom_level(self):
shootstrat = OneWayShootingStrategy()
repexstrat = NearestNeighborRepExStrategy()
defaultstrat = OrganizeByMoveGroupStrategy()
assert_equal(len(list(self.scheme.strategies.keys())), 0)
self.scheme.append([shootstrat, repexstrat, defaultstrat], 60)
strats = self.scheme.strategies
assert_equal(len(list(strats.keys())), 1)
assert_items_equal(strats[60], [shootstrat, repexstrat, defaultstrat])
def test_append_group_different_custom_levels(self):
shootstrat = OneWayShootingStrategy()
repexstrat = NearestNeighborRepExStrategy()
defaultstrat = OrganizeByMoveGroupStrategy()
assert_equal(len(list(self.scheme.strategies.keys())), 0)
self.scheme.append([shootstrat, repexstrat, defaultstrat],
[45, 55, 65])
strats = self.scheme.strategies
assert_equal(len(list(strats.keys())), 3)
for (k, v) in [(45, shootstrat), (55, repexstrat), (65, defaultstrat)]:
assert_in(v, strats[k])
def test_apply_strategy(self):
if self.scheme.movers == {}:
print("Full support of MoveStrategy implemented?")
print("Time to remove legacy from tests.")
else:
self.scheme.movers = {}
shoot_strat_1 = OneWayShootingStrategy(
ensembles=self.scheme.network.sampling_transitions[0].ensembles,
replace=False
)
shoot_strat_2 = OneWayShootingStrategy(
ensembles=(
[self.scheme.network.sampling_transitions[0].ensembles[-1]] +
self.scheme.network.sampling_transitions[1].ensembles
),
replace=False
)
shoot_strat_3 = OneWayShootingStrategy(replace=True)
self.scheme.apply_strategy(shoot_strat_1)
assert_items_equal(list(self.scheme.movers.keys()), ['shooting'])
assert_equal(len(self.scheme.movers['shooting']), 3)
self.scheme.apply_strategy(shoot_strat_2)
assert_items_equal(list(self.scheme.movers.keys()), ['shooting'])
assert_equal(len(self.scheme.movers['shooting']), 7)
old_movers = copy.copy(self.scheme.movers['shooting'])
self.scheme.apply_strategy(shoot_strat_3)
assert_items_equal(list(self.scheme.movers.keys()), ['shooting'])
assert_equal(len(self.scheme.movers['shooting']), 7)
new_movers = self.scheme.movers['shooting']
for (o, n) in zip(old_movers, new_movers):
assert_equal(o is n, False)
shoot_strat_3.replace_signatures = True
self.scheme.apply_strategy(shoot_strat_3)
assert_equal(len(self.scheme.movers['shooting']), 6)
self.scheme.movers = {}
shoot_strat_1.set_replace(True)
self.scheme.apply_strategy(shoot_strat_1)
assert_equal(len(self.scheme.movers['shooting']), 3)
old_movers = copy.copy(self.scheme.movers['shooting'])
shoot_strat_3.replace_signatures = False
self.scheme.apply_strategy(shoot_strat_3)
assert_equal(len(self.scheme.movers['shooting']), 6)
new_movers = self.scheme.movers['shooting']
for (o, n) in zip(old_movers, new_movers):
assert_equal(o is n, False)
def test_move_decision_tree(self):
shoot = OneWayShootingStrategy()
repex = NearestNeighborRepExStrategy()
default = OrganizeByMoveGroupStrategy()
self.scheme.append([default, shoot, repex])
assert_equal(self.scheme.root_mover, None)
root = self.scheme.move_decision_tree()
assert_not_equal(self.scheme.root_mover, None)
assert_equal(len(root.movers), 2)
names = ['ShootingChooser', 'RepexChooser']
name_dict = {root.movers[i].name : i for i in range(len(root.movers))}
for name in names:
assert_in(name, list(name_dict.keys()))
assert_equal(len(root.movers[name_dict['ShootingChooser']].movers), 6)
assert_equal(len(root.movers[name_dict['RepexChooser']].movers), 4)
new_root = self.scheme.move_decision_tree()
assert_is(new_root, root)
new_root = self.scheme.move_decision_tree(rebuild=True)
assert_is_not(new_root, root)
def test_repex_style_switching(self):
nn_repex = NearestNeighborRepExStrategy()
all_repex = AllSetRepExStrategy()
default = OrganizeByMoveGroupStrategy()
self.scheme.append([default, nn_repex])
root = self.scheme.move_decision_tree(rebuild=True)
assert_equal(len(self.scheme.movers['repex']), 4)
self.scheme.append(all_repex, force=True)
root = self.scheme.move_decision_tree(rebuild=True)
assert_equal(len(self.scheme.movers['repex']), 6)
self.scheme.append(nn_repex, force=True)
root = self.scheme.move_decision_tree(rebuild=True)
assert_equal(len(self.scheme.movers['repex']), 4)
def test_build_balance_partners(self):
ensA = self.scheme.network.sampling_transitions[0].ensembles[0]
ensB = self.scheme.network.sampling_transitions[0].ensembles[1]
hopAB = paths.EnsembleHopMover(ensemble=ensA, target_ensemble=ensB)
hopBA = paths.EnsembleHopMover(ensemble=ensB, target_ensemble=ensA)
self.scheme.movers['hop'] = [hopAB, hopBA]
self.scheme.append(strategies.OrganizeByMoveGroupStrategy())
root = self.scheme.move_decision_tree()
self.scheme.build_balance_partners()
assert_equal(self.scheme.balance_partners[hopAB], [hopBA])
assert_equal(self.scheme.balance_partners[hopBA], [hopAB])
@raises(RuntimeWarning)
def test_build_balance_partners_premature(self):
self.scheme.movers = {}
self.scheme.build_balance_partners()
@raises(RuntimeWarning)
def test_build_balance_partners_no_partner(self):
ensA = self.scheme.network.sampling_transitions[0].ensembles[0]
ensB = self.scheme.network.sampling_transitions[0].ensembles[1]
hopAB = paths.EnsembleHopMover(ensemble=ensA, target_ensemble=ensB)
hopBA = paths.EnsembleHopMover(ensemble=ensB, target_ensemble=ensA)
self.scheme.movers['hop'] = [hopAB]
self.scheme.append(strategies.OrganizeByMoveGroupStrategy())
root = self.scheme.move_decision_tree()
self.scheme.build_balance_partners()
@raises(RuntimeWarning)
def test_build_balance_partners_two_partners(self):
ensA = self.scheme.network.sampling_transitions[0].ensembles[0]
ensB = self.scheme.network.sampling_transitions[0].ensembles[1]
hopAB = paths.EnsembleHopMover(ensemble=ensA, target_ensemble=ensB)
hopAB2 = paths.EnsembleHopMover(ensemble=ensA, target_ensemble=ensB)
hopBA = paths.EnsembleHopMover(ensemble=ensB, target_ensemble=ensA)
self.scheme.movers['hop'] = [hopAB, hopBA, hopAB2]
self.scheme.append(strategies.OrganizeByMoveGroupStrategy())
root = self.scheme.move_decision_tree()
self.scheme.build_balance_partners()
def test_sanity_check_sane(self):
self.scheme.append([NearestNeighborRepExStrategy(),
OneWayShootingStrategy(),
OrganizeByMoveGroupStrategy()])
root = self.scheme.move_decision_tree()
self.scheme.sanity_check()
@raises(AssertionError)
def test_sanity_check_unused_sampling(self):
ensemble_subset = self.scheme.network.sampling_transitions[0].ensembles
self.scheme.append([
OneWayShootingStrategy(ensembles=ensemble_subset),
OrganizeByMoveGroupStrategy()
])
root = self.scheme.move_decision_tree()
self.scheme.sanity_check()
@raises(AssertionError)
def test_sanity_check_choice_prob_fails(self):
self.scheme.append([NearestNeighborRepExStrategy(),
OneWayShootingStrategy(),
OrganizeByMoveGroupStrategy()])
root = self.scheme.move_decision_tree()
key0 = list(self.scheme.choice_probability.keys())[0]
self.scheme.choice_probability[key0] = 0.0
self.scheme.sanity_check()
@raises(AssertionError)
def test_sanity_check_duplicated_movers(self):
ensemble_subset = self.scheme.network.sampling_transitions[0].ensembles
self.scheme.append([
OneWayShootingStrategy(),
OrganizeByMoveGroupStrategy()
])
root = self.scheme.move_decision_tree()
self.scheme.movers['foo'] = [self.scheme.movers['shooting'][0]]
self.scheme.sanity_check()
@raises(TypeError)
def test_select_movers_no_choice_probability(self):
self.scheme.append([OneWayShootingStrategy(),
OrganizeByMoveGroupStrategy()])
movers = self.scheme._select_movers('shooting')
def test_select_movers(self):
self.scheme.append([
OneWayShootingStrategy(),
NearestNeighborRepExStrategy(),
OrganizeByMoveGroupStrategy()
])
root = self.scheme.move_decision_tree()
some_shooters = self.scheme.movers['shooting'][0:2]
movers = self.scheme._select_movers('shooting')
assert_equal(movers, self.scheme.movers['shooting'])
movers = self.scheme._select_movers(some_shooters)
assert_equal(movers, some_shooters)
movers = self.scheme._select_movers(some_shooters[0])
assert_equal(movers, [some_shooters[0]])
def test_n_trials_for_steps(self):
self.scheme.append([
OneWayShootingStrategy(),
NearestNeighborRepExStrategy(),
OrganizeByMoveGroupStrategy()
])
# we should have 6 shooters and 4 repex movers, but default strategy
# means that have the probability of selecting repex; so we get
# a shooting move 75% of the time
root = self.scheme.move_decision_tree()
root = self.scheme.move_decision_tree()
some_shooters = self.scheme.movers['shooting'][0:2]
assert_almost_equal(
self.scheme.n_trials_for_steps('shooting', 100), 75.0
)
assert_almost_equal(
self.scheme.n_trials_for_steps(some_shooters, 100), 25.0
)
assert_almost_equal(
self.scheme.n_trials_for_steps(some_shooters[0], 100), 12.5
)
def test_n_steps_for_trials(self):
self.scheme.append([
OneWayShootingStrategy(),
NearestNeighborRepExStrategy(),
OrganizeByMoveGroupStrategy()
])
# we should have 6 shooters and 4 repex movers, but default strategy
# means that have the probability of selecting repex; so we get
# a shooting move 75% of the time
root = self.scheme.move_decision_tree()
some_shooters = self.scheme.movers['shooting'][0:2]
assert_almost_equal(
self.scheme.n_steps_for_trials('shooting', 100), old_div(400.0,3.0)
)
assert_almost_equal(
self.scheme.n_steps_for_trials(some_shooters, 100), 400.0
)
assert_almost_equal(
self.scheme.n_steps_for_trials(some_shooters[0], 100), 800.0
)
class TestDefaultScheme(object):
def setup(self):
paths.InterfaceSet._reset()
cvA = paths.FunctionCV(name="xA", f=lambda s : s.xyz[0][0])
cvB = paths.FunctionCV(name="xB", f=lambda s : -s.xyz[0][0])
self.stateA = paths.CVDefinedVolume(cvA, float("-inf"), -0.5)
self.stateB = paths.CVDefinedVolume(cvB, float("-inf"), -0.5)
interfacesA = paths.VolumeInterfaceSet(cvA, float("-inf"),
[-0.5, -0.3, -0.1])
interfacesB = paths.VolumeInterfaceSet(cvB, float("-inf"),
[-0.5, -0.3, -0.1])
self.network = paths.MSTISNetwork(
[(self.stateA, interfacesA),
(self.stateB, interfacesB)],
ms_outers=paths.MSOuterTISInterface.from_lambdas(
{interfacesA: 0.0, interfacesB: 0.0}
)
)
self.no_ms_outer = paths.MSTISNetwork(
[(self.stateA, interfacesA), (self.stateB, interfacesB)]
)
def test_default_scheme(self):
scheme = DefaultScheme(self.network)
root = scheme.move_decision_tree()
chooser_type_dict = {
'ShootingChooser' : paths.OneWayShootingMover,
'PathreversalChooser' : paths.PathReversalMover,
'RepexChooser' : paths.ReplicaExchangeMover,
'MinusChooser' : paths.MinusMover,
'Ms_outer_shootingChooser' : paths.OneWayShootingMover
}
names = list(chooser_type_dict.keys())
assert_equal(len(root.movers), len(names))
name_dict = {root.movers[i].name : i for i in range(len(root.movers))}
for name in names:
assert_in(name, list(name_dict.keys()))
n_normal_repex = 4
n_msouter_repex = 2
n_repex = n_normal_repex + n_msouter_repex
assert_equal(
len(root.movers[name_dict['ShootingChooser']].movers), 6
)
assert_equal(
len(root.movers[name_dict['PathreversalChooser']].movers), 7
)
assert_equal(
len(root.movers[name_dict['RepexChooser']].movers), n_repex
)
assert_equal(
len(root.movers[name_dict['MinusChooser']].movers), 2
)
assert_equal(
len(root.movers[name_dict['Ms_outer_shootingChooser']].movers), 1
)
for choosername in names:
for mover in root.movers[name_dict[choosername]].movers:
assert_equal(type(mover), chooser_type_dict[choosername])
def test_default_scheme_no_ms_outer(self):
scheme = DefaultScheme(self.no_ms_outer)
root = scheme.move_decision_tree()
chooser_type_dict = {
'ShootingChooser' : paths.OneWayShootingMover,
'PathreversalChooser' : paths.PathReversalMover,
'RepexChooser' : paths.ReplicaExchangeMover,
'MinusChooser' : paths.MinusMover
}
names = list(chooser_type_dict.keys())
assert_equal(len(root.movers), len(names))
name_dict = {root.movers[i].name : i for i in range(len(root.movers))}
for name in names:
assert_in(name, list(name_dict.keys()))
n_normal_repex = 4
n_msouter_repex = 0
n_repex = n_normal_repex + n_msouter_repex
assert_equal(
len(root.movers[name_dict['ShootingChooser']].movers), 6
)
assert_equal(
len(root.movers[name_dict['PathreversalChooser']].movers), 6
)
assert_equal(
len(root.movers[name_dict['RepexChooser']].movers), n_repex
)
assert_equal(
len(root.movers[name_dict['MinusChooser']].movers), 2
)
def test_default_sanity(self):
scheme = DefaultScheme(self.network)
root = scheme.move_decision_tree()
scheme.sanity_check()
def test_default_hidden_ensembles(self):
scheme = DefaultScheme(self.network)
root = scheme.move_decision_tree()
hidden = scheme.find_hidden_ensembles()
assert_equal(len(hidden), 2)
def test_default_unused_ensembles(self):
scheme = DefaultScheme(self.network)
root = scheme.move_decision_tree()
unused = scheme.find_unused_ensembles()
assert_equal(len(unused), 0) # will change when minus/msouter
def test_default_balance_partners(self):
scheme = DefaultScheme(self.network)
root = scheme.move_decision_tree()
scheme.build_balance_partners()
# by default, every mover is its own balance partner
for group in list(scheme.movers.values()):
for mover in group:
assert_equal(scheme.balance_partners[mover], [mover])
def test_default_choice_probability(self):
scheme = DefaultScheme(self.network)
root = scheme.move_decision_tree()
default_group_weights = {
'shooting' : 1.0,
'repex' : 0.5,
'pathreversal' : 0.5,
'minus' : 0.2,
'ms_outer_shooting' : 1.0
}
assert_almost_equal(sum(scheme.choice_probability.values()), 1.0)
tot_norm = sum([default_group_weights[group]
for group in scheme.movers])
prob_shoot0 = scheme.choice_probability[scheme.movers['shooting'][0]]
n_shooting = len(scheme.movers['shooting'])
for group in default_group_weights:
scale = default_group_weights[group]
n_group = len(scheme.movers[group])
expected_prob = default_group_weights[group]*prob_shoot0
for mover in scheme.movers[group]:
test_prob = scheme.choice_probability[mover]
assert_almost_equal(expected_prob, test_prob)
def test_initial_conditions_from_trajectory(self):
scheme = DefaultScheme(self.network)
# root = scheme.move_decision_tree()
assert_equal(len(scheme.list_initial_ensembles()), 9)
traj1 = make_1d_traj([-0.6, -0.2, -0.6])
traj2 = make_1d_traj([-0.6, -0.2, -0.05, -0.4, -0.6])
traj3 = make_1d_traj([-0.6, -0.2, 0.2, 0.6])
all_trajs = [traj1, traj2, traj3]
traj1r = traj1.reversed
traj2r = traj2.reversed
traj3r = traj3.reversed
def assert_init_cond(sample_set, ensembles, expected):
# helper to check the results. Expected is in the form
# of a list of resulting trajectories
# ens is the list of ensembles to be tested in order
sample_set.sanity_check()
assert_equal(len(sample_set), len(expected))
for ensemble, traj in zip(ensembles, expected):
# print ensemble.name, sample_set[ensemble].trajectory.xyz[:,0,0], traj.xyz[:, 0,0],
# print hex(id(traj)), hex(id(sample_set[ensemble].trajectory.xyz[:,0,0]))
assert_equal(sample_set[ensemble].trajectory, traj)
transAB = transBA = None
for trans in self.network.sampling_transitions:
if trans.stateA == self.stateA and trans.stateB == self.stateB:
transAB = trans
elif trans.stateA == self.stateB and trans.stateB == self.stateA:
transBA = trans
else:
raise RuntimeWarning("That's a weird transition!")
ms_outer_ens = list(self.network.special_ensembles['ms_outer'].keys())[0]
ensembles = transAB.ensembles + [ms_outer_ens] + transBA.ensembles
init_cond = scheme.initial_conditions_from_trajectories(
trajectories=all_trajs,
preconditions=[],
reuse_strategy='all',
strategies=['get']
)
assert_init_cond(
init_cond,
ensembles[:4],
[traj1, traj1, traj2, traj3]
)
init_cond = scheme.initial_conditions_from_trajectories(
trajectories=all_trajs,
preconditions=['mirror'],
reuse_strategy='all',
strategies=['get']
)
assert_init_cond(
init_cond, ensembles,
[traj1, traj1, traj2] + [traj3] + [traj3r] * 3
)
init_cond = scheme.initial_conditions_from_trajectories(
trajectories=all_trajs,
preconditions=['mirror', 'sort-shortest'],
strategies=['get'],
reuse_strategy='all'
)
assert_init_cond(
init_cond, ensembles,
[traj1, traj1, traj3] + [traj3] + [traj3r] * 3
)
init_cond = scheme.initial_conditions_from_trajectories(
trajectories=all_trajs,
preconditions=[],
reuse_strategy='avoid',
strategies=['get']
)
assert_init_cond(
init_cond,
ensembles[:4],
[traj1, traj2, traj3, traj3]
)
init_cond = scheme.initial_conditions_from_trajectories(
trajectories=all_trajs,
preconditions=['mirror'],
reuse_strategy='avoid',
strategies=['get']
)
init_cond.sanity_check()
assert_equal(len(init_cond), 7)
for ensemble, traj in zip(ensembles[:3], [traj1, traj2, traj3]):
assert_equal(init_cond[ensemble].trajectory, traj)
for ensemble, traj in zip(ensembles[4:], [traj3r] * 3):
assert_equal(init_cond[ensemble].trajectory, traj)
# because of the way the scheme ensembles are creating involving a
# set, the order in which the ensemble are created changes.
# in some cases traj3 is used and hence avoided in the outer
# in some cases traj3r, but both are fine.
try:
assert_equal(init_cond[ensembles[3]].trajectory, traj3)
except AssertionError:
assert_equal(init_cond[ensembles[3]].trajectory, traj3r)
init_cond = scheme.initial_conditions_from_trajectories(
trajectories=all_trajs,
preconditions=['mirror', 'sort-shortest'],
reuse_strategy='avoid',
strategies=['get']
)
init_cond.sanity_check()
assert_equal(len(init_cond), 7)
for ensemble, traj in zip(ensembles[:3], [traj1, traj1r, traj3]):
assert_equal(init_cond[ensemble].trajectory, traj)
for ensemble, traj in zip(ensembles[4:], [traj3r] * 3):
assert_equal(init_cond[ensemble].trajectory, traj)
try:
assert_equal(init_cond[ensembles[3]].trajectory, traj3)
except AssertionError:
assert_equal(init_cond[ensembles[3]].trajectory, traj3r)
# this one avoids reversed copies
init_cond = scheme.initial_conditions_from_trajectories(
trajectories=[traj1],
preconditions=[],
strategies=['get']
)
assert_init_cond(
init_cond,
ensembles[:2],
[traj1, traj1]
)
init_cond = scheme.initial_conditions_from_trajectories(
trajectories=traj2,
sample_set=init_cond,
preconditions=[],
strategies=['get']
)
assert_init_cond(
init_cond,
ensembles[:3],
[traj1, traj1, traj2]
)
init_cond = scheme.initial_conditions_from_trajectories(
trajectories=[traj3],
preconditions=[],
strategies=['get']
)
assert_init_cond(
init_cond,
ensembles[:4],
[traj3] * 4
)
init_cond = scheme.initial_conditions_from_trajectories(
trajectories=[traj3],
preconditions=['mirror'],
strategies=['get']
)
assert_init_cond(
init_cond,
ensembles,
[traj3] * 4 + [traj3r] * 3
)
def test_check_initial_conditions(self):
scheme = DefaultScheme(self.network)
traj3 = make_1d_traj([-0.6, -0.2, 0.2, 0.6])
# cheating a bit, since we know what this gives
init_cond = scheme.initial_conditions_from_trajectories(traj3)
assert_equal(len(init_cond), 7)
assert_equal(len(scheme.list_initial_ensembles()), 9)
(missing, extra) = scheme.check_initial_conditions(init_cond)
assert_equal(len(missing), 2)
assert_equal(len(extra), 0)
for ens in list(self.network.special_ensembles['minus'].keys()):
assert_in([ens], missing)
init_cond.append_as_new_replica(
paths.Sample(trajectory=traj3,
ensemble=paths.LengthEnsemble(4),
replica=None)
)
(missing, extra) = scheme.check_initial_conditions(init_cond)
assert_equal(len(missing), 2)
assert_equal(len(extra), 1)
@raises(AssertionError)
def test_assert_initial_conditions(self):
scheme = DefaultScheme(self.network)
traj3 = make_1d_traj([-0.6, -0.2, 0.2, 0.6])
init_cond = scheme.initial_conditions_from_trajectories(traj3)
init_cond.append_as_new_replica(
paths.Sample(trajectory=traj3,
ensemble=paths.LengthEnsemble(4),
replica=None)
)
scheme.assert_initial_conditions(init_cond)
def test_initial_conditions_report(self):
scheme = DefaultScheme(self.network)
traj3 = make_1d_traj([-0.6, -0.2, 0.2, 0.6])
init_cond = scheme.initial_conditions_from_trajectories(traj3)
init_cond.append_as_new_replica(
paths.Sample(trajectory=traj3,
ensemble=paths.LengthEnsemble(4),
replica=None)
)
start = "Missing ensembles:\n"
missing_A = "* [Out A minus]\n"
missing_B = "* [Out B minus]\n"
finish = "Extra ensembles:\n* [LengthEnsemble]\n"
expected_AB = start + missing_A + missing_B + finish
expected_BA = start + missing_B + missing_A + finish
result = scheme.initial_conditions_report(init_cond)
try:
assert_equal(result, expected_AB)
except AssertionError:
assert_equal(result, expected_BA)
class TestLockedMoveScheme(object):
def setup(self):
paths.InterfaceSet._reset()
cvA = paths.FunctionCV(name="xA", f=lambda s : s.xyz[0][0])
cvB = paths.FunctionCV(name="xB", f=lambda s : -s.xyz[0][0])
self.stateA = paths.CVDefinedVolume(cvA, float("-inf"), -0.5)
self.stateB = paths.CVDefinedVolume(cvB, float("-inf"), -0.5)
interfacesA = paths.VolumeInterfaceSet(cvA, float("-inf"),
[-0.5, -0.3, -0.1])
interfacesB = paths.VolumeInterfaceSet(cvB, float("-inf"),
[-0.5, -0.3, -0.1])
self.network = paths.MSTISNetwork(
[(self.stateA, interfacesA),
(self.stateB, interfacesB)],
ms_outers=paths.MSOuterTISInterface.from_lambdas(
{interfacesA: 0.0, interfacesB: 0.0}
)
)
self.basic_scheme = DefaultScheme(self.network)
self.root_mover = self.basic_scheme.move_decision_tree()
def test_initialization(self):
scheme = LockedMoveScheme(self.root_mover, self.network)
assert_equal(scheme.network, self.network)
assert_equal(scheme.move_decision_tree(), self.root_mover)
def test_build_move_decision_tree(self):
scheme = LockedMoveScheme(self.root_mover, self.network)
scheme.move_decision_tree(rebuild=True)
assert_equal(scheme.move_decision_tree(), self.root_mover)
@raises(TypeError)
def test_append(self):
scheme = LockedMoveScheme(self.root_mover, self.network)
scheme.append(AllSetRepExStrategy())
@raises(TypeError)
def test_apply_strategy(self):
scheme = LockedMoveScheme(self.root_mover, self.network)
strategy = AllSetRepExStrategy()
scheme.apply_strategy(strategy)
@raises(AttributeError)
def test_choice_probability_fail(self):
scheme = LockedMoveScheme(self.root_mover, self.network)
vals = scheme.choice_probability
def test_choice_probability_works(self):
scheme = LockedMoveScheme(self.root_mover, self.network)
scheme.choice_probability = self.basic_scheme.choice_probability
vals = scheme.choice_probability
@raises(AttributeError)
def test_movers_fail(self):
scheme = LockedMoveScheme(self.root_mover, self.network)
vals = scheme.movers
def test_movers_works(self):
scheme = LockedMoveScheme(self.root_mover, self.network)
scheme.movers = self.basic_scheme.movers
vals = scheme.movers
class TestOneWayShootingMoveScheme(object):
def setup(self):
paths.InterfaceSet._reset()
cvA = paths.FunctionCV(name="xA", f=lambda s : s.xyz[0][0])
cvB = paths.FunctionCV(name="xB", f=lambda s : -s.xyz[0][0])
self.stateA = paths.CVDefinedVolume(cvA, float("-inf"), -0.5)
self.stateB = paths.CVDefinedVolume(cvB, float("-inf"), -0.5)
interfacesA = paths.VolumeInterfaceSet(cvA, float("-inf"),
[-0.5, -0.3, -0.1])
interfacesB = paths.VolumeInterfaceSet(cvB, float("-inf"),
[-0.5, -0.3, -0.1])
self.network = paths.MSTISNetwork(
[(self.stateA, interfacesA),
(self.stateB, interfacesB)],
ms_outers=paths.MSOuterTISInterface.from_lambdas(
{interfacesA: 0.0, interfacesB: 0.0}
)
)
def test_scheme(self):
scheme = OneWayShootingMoveScheme(self.network)
root = scheme.move_decision_tree()
assert_equal(len(scheme.movers), 1)
assert_equal(len(root.movers), 1)
def test_sanity(self):
scheme = OneWayShootingMoveScheme(self.network)
root = scheme.move_decision_tree()
scheme.sanity_check()
def test_unused_ensembles(self):
scheme = OneWayShootingMoveScheme(self.network)
root = scheme.move_decision_tree()
unused = scheme.find_unused_ensembles()
specials = self.network.special_ensembles
expected_unused = sum([list(specials[special_type].keys())
for special_type in specials], [])
assert_equal(set(expected_unused), set(unused))
def test_check_initial_conditions(self):
scheme = OneWayShootingMoveScheme(self.network)
traj3 = make_1d_traj([-0.6, -0.2, 0.2, 0.6])
init_cond = scheme.initial_conditions_from_trajectories(traj3)
assert_equal(len(scheme.list_initial_ensembles()), 6)
assert_equal(len(init_cond), 6)
scheme.assert_initial_conditions(init_cond)
assert_equal(scheme.initial_conditions_report(init_cond),
"No missing ensembles.\nNo extra ensembles.\n")
|
80574
|
import sys
import os
own_dir = os.path.abspath(os.path.dirname(__name__))
repo_root = os.path.abspath(os.path.join(own_dir, os.path.pardir))
sys.path.insert(1, repo_root)
|
80593
|
import smtplib
from email import encoders
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
sender_email=input("enter sender email")
reciver_emil=input("add reciver email")
server=smtplib.SMTP_SSL('smtp.gmail.com',465)
# server.connect("smtp.gmail.com",465)
server.ehlo()
with open('pwd.txt','r') as f:
pwd=f.read()
server.login(sender_email,pwd)
print(server.set_debuglevel(1))
msg= MIMEMultipart()
msg['From']='Dila'
msg['To']='reciver mail'
msg['Subject']='just a test'
with open('msg.txt','r') as f:
message=f.read()
msg.attach(MIMEText(message,'plain'))
filename='hack - Copy.jpg'
attachment=open(filename,'rb')
p=MIMEBase('application','octet-stream')
p.set_payload(attachment.read())
encoders.encode_base64(p)
p.add_header('content-Disposition',f'attachment; filename={filename}')
msg.attach(p)
text=msg.as_string()
server.sendmail(sender_email,reciver_emil,text)
server.quit()
|
80629
|
import matplotlib.pyplot as plt
from ._compat import *
class GridCurves(object):
def __init__(self, curves):
self._curves = curves
@property
def curves(self):
return self._curves
def __str__(self):
fh = StringIO()
fh.write('gridcurves objects:\n\n')
fh.write(' with %d gridlines.:\n\n' % len(self.curves))
return fh.getvalue()
def __repr__(self):
return str(self)
def plot(self, *args, **kwargs):
for curve in self.curves:
plt.plot(curve.real, curve.imag, color = '0.6')
|
80637
|
from .slm import StandardLinearModel
from .glm import GeneralizedLinearModel, GeneralisedLinearModel
from .btypes import Bound, Positive, Parameter
from . import likelihoods, basis_functions, metrics, btypes
|
80649
|
from .report_server import ReportServer
from .report_client import ReportClient
from .record import ReportRecord
from .nsga_iii import NonDominatedSorting, SortAndSelectPopulation
|
80662
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: apt_keys
author: Manala (@manala)
short_description: returns a curated keys list
description:
- Takes a keys list and returns it curated.
'''
from ansible.plugins.lookup import LookupBase
from ansible.errors import AnsibleError
from ansible.module_utils.six import string_types
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
results = []
keys = self._flatten(terms[0])
keysPatterns = terms[1]
repositories = terms[2]
itemDefault = {}
# Handle repositories defined as reversed preferences
for repository in repositories[::-1]:
if 'key' in repository:
keys.insert(0, repository.get('key'))
for key in keys:
items = []
item = itemDefault.copy()
# Short syntax
if isinstance(key, string_types):
item.update(
keysPatterns.get(key)
)
else:
# Must be a dict
if not isinstance(key, dict):
raise AnsibleError('Expected a dict but was a %s' % type(key))
# Check id key
if 'id' not in key:
raise AnsibleError('Missing "id" key')
item.update(key)
items.append(item)
# Merge by index key
for item in items:
itemFound = False
for i, result in enumerate(results):
if result['id'] == item['id']:
results[i] = item
itemFound = True
break
if not itemFound:
results.append(item)
return results
|
80676
|
USER_INFORMATION ={
"object": "user",
"url": "https://api.wanikani.com/v2/user",
"data_updated_at": "2020-05-01T05:20:41.769053Z",
"data": {
"id": "7a18daeb-4067-4e77-b0ea-230c7c347ea8",
"username": "Tadgh11",
"level": 12,
"profile_url": "https://www.wanikani.com/users/Tadgh11",
"started_at": "2013-07-09T12:02:54.952786Z",
"subscription": {
"active": True,
"type": "lifetime",
"max_level_granted": 60,
"period_ends_at": None
},
"current_vacation_started_at": None,
"preferences": {
"lessons_batch_size": 5,
"lessons_autoplay_audio": True,
"reviews_autoplay_audio": False,
"lessons_presentation_order": "ascending_level_then_subject",
"reviews_display_srs_indicator": True
}
}
}
SUBJECT = {
"id": 2467,
"object": "vocabulary",
"url": "https://api.wanikani.com/v2/subjects/2467",
"data_updated_at": "2018-05-21T21:52:43.041390Z",
"data": {
"created_at": "2012-02-28T08:04:47.000000Z",
"level": 1,
"slug": "一",
"hidden_at": None,
"document_url": "https://www.wanikani.com/vocabulary/%E4%B8%80",
"characters": "一",
"meanings": [{"meaning": "One", "primary": True, "accepted_answer": True}],
"readings": [{"primary": True, "reading": "いち", "accepted_answer": True}],
"parts_of_speech": ["numeral"],
"component_subject_ids": [440],
"auxiliary_meanings": [],
},
}
SINGLE_SUBJECT = {
"id": 1,
"object": "radical",
"url": "https://api.wanikani.com/v2/subjects/1",
"data_updated_at": "2018-12-05T20:47:15.603277Z",
"data": {
"created_at": "2012-02-27T18:08:16.000000Z",
"level": 1,
"slug": "ground",
"hidden_at": None,
"document_url": "https://www.wanikani.com/radicals/ground",
"characters": "一",
"character_images": [
{
"url": "https://cdn.wanikani.com/images/legacy/1054-subject-1-normal-weight-black-original.png?1520987606",
"metadata": {
"color": "#000000",
"dimensions": "1024x1024",
"style_name": "original",
},
"content_type": "image/png",
}
],
"meanings": [{"meaning": "Ground", "primary": True, "accepted_answer": True}],
"auxiliary_meanings": [],
"amalgamation_subject_ids": [2],
},
}
EMPTY_SUBJECTS_PAGE = {
"object": "collection",
"url": "https://api.wanikani.com/v2/subjects?ids=1%2C2%2C3&slugs=abc%2C123&types=vocabulary",
"pages": {"per_page": 1000, "next_url": None, "previous_url": None},
"total_count": 0,
"data_updated_at": None,
"data": [],
}
SUBJECTS_PAGE = {
"object": "collection",
"url": "https://api.wanikani.com/v2/subjects",
"pages": {"per_page": 1000, "next_url": None, "previous_url": None},
"total_count": 3,
"data_updated_at": "2018-07-05T22:22:07.129381Z",
"data": [
{
"id": 3,
"object": "radical",
"url": "https://api.wanikani.com/v2/subjects/1",
"data_updated_at": "2018-05-21T21:51:35.051365Z",
"data": {
"created_at": "2012-02-27T18:08:16.000000Z",
"level": 1,
"slug": "ground",
"hidden_at": None,
"document_url": "https://www.wanikani.com/radicals/ground",
"characters": "一",
"auxiliary_meanings": [],
"character_images": [
{
"url": "https://cdn.wanikani.com/images/legacy/1054-subject-1-normal-weight-black-original.png?1520987606",
"metadata": {
"color": "#000000",
"dimensions": "1024x1024",
"style_name": "original",
},
"content_type": "image/png",
},
{
"url": "https://cdn.wanikani.com/images/legacy/1054-subject-1-normal-weight-black-1024px.png?1520987606",
"metadata": {
"color": "#000000",
"dimensions": "1024x1024",
"style_name": "1024px",
},
"content_type": "image/png",
},
{
"url": "https://cdn.wanikani.com/images/legacy/1054-subject-1-normal-weight-black-512px.png?1520987606",
"metadata": {
"color": "#000000",
"dimensions": "512x512",
"style_name": "512px",
},
"content_type": "image/png",
},
{
"url": "https://cdn.wanikani.com/images/legacy/1054-subject-1-normal-weight-black-256px.png?1520987606",
"metadata": {
"color": "#000000",
"dimensions": "256x256",
"style_name": "256px",
},
"content_type": "image/png",
},
{
"url": "https://cdn.wanikani.com/images/legacy/1054-subject-1-normal-weight-black-128px.png?1520987606",
"metadata": {
"color": "#000000",
"dimensions": "128x128",
"style_name": "128px",
},
"content_type": "image/png",
},
{
"url": "https://cdn.wanikani.com/images/legacy/1054-subject-1-normal-weight-black-64px.png?1520987606",
"metadata": {
"color": "#000000",
"dimensions": "64x64",
"style_name": "64px",
},
"content_type": "image/png",
},
{
"url": "https://cdn.wanikani.com/images/legacy/1054-subject-1-normal-weight-black-32px.png?1520987606",
"metadata": {
"color": "#000000",
"dimensions": "32x32",
"style_name": "32px",
},
"content_type": "image/png",
},
{
"url": "https://cdn.wanikani.com/images/legacy/576-subject-1-without-css-original.svg?1520987227",
"metadata": {"inline_styles": False},
"content_type": "image/svg+xml",
},
{
"url": "https://cdn.wanikani.com/images/legacy/98-subject-1-with-css-original.svg?1520987072",
"metadata": {"inline_styles": True},
"content_type": "image/svg+xml",
},
],
"meanings": [
{"meaning": "Ground", "primary": True, "accepted_answer": True}
],
"amalgamation_subject_ids": [
440,
449,
450,
451,
488,
531,
533,
568,
590,
609,
633,
635,
709,
710,
724,
783,
808,
913,
932,
965,
971,
1000,
1020,
1085,
1113,
1126,
1137,
1178,
1198,
1240,
1241,
1249,
1340,
1367,
1372,
1376,
1379,
1428,
1431,
1463,
1491,
1506,
1521,
1547,
1559,
1591,
1655,
1674,
1706,
1769,
1851,
1852,
1855,
1868,
1869,
1888,
1970,
2091,
2104,
2128,
2138,
2148,
2171,
2182,
2263,
2277,
2334,
2375,
2419,
2437,
],
},
},
{
"id": 1,
"object": "kanji",
"url": "https://api.wanikani.com/v2/subjects/534",
"data_updated_at": "2018-05-21T21:51:48.658813Z",
"data": {
"created_at": "2012-03-02T02:11:55.000000Z",
"level": 4,
"slug": "央",
"hidden_at": None,
"document_url": "https://www.wanikani.com/kanji/%E5%A4%AE",
"characters": "央",
"meanings": [
{"meaning": "Center", "primary": True, "accepted_answer": True},
{"meaning": "Central", "primary": False, "accepted_answer": True},
{"meaning": "Centre", "primary": False, "accepted_answer": True},
],
"readings": [
{
"type": "onyomi",
"primary": True,
"reading": "おう",
"accepted_answer": True,
}
],
"auxiliary_meanings": [],
"component_subject_ids": [29, 18],
"amalgamation_subject_ids": [2726],
},
},
{
"id": 2,
"object": "vocabulary",
"url": "https://api.wanikani.com/v2/subjects/2467",
"data_updated_at": "2018-05-21T21:52:43.041390Z",
"data": {
"created_at": "2012-02-28T08:04:47.000000Z",
"level": 1,
"slug": "一",
"hidden_at": None,
"document_url": "https://www.wanikani.com/vocabulary/%E4%B8%80",
"characters": "一",
"meanings": [
{"meaning": "One", "primary": True, "accepted_answer": True}
],
"readings": [
{"primary": True, "reading": "いち", "accepted_answer": True}
],
"parts_of_speech": ["numeral"],
"auxiliary_meanings": [],
"component_subject_ids": [440],
},
},
],
}
ASSIGNMENTS_PAGE = {
"object": "collection",
"url": "https://api.wanikani.com/v2/assignments",
"pages": {
"per_page": 500,
"next_url": "https://api.wanikani.com/v2/assignments?page_after_id=62308815",
"previous_url": None,
},
"total_count": 3,
"data_updated_at": "2018-06-30T16:40:52.513654Z",
"data": [
{
"id": 85899,
"object": "assignment",
"url": "https://api.wanikani.com/v2/assignments/85899",
"data_updated_at": "2018-05-09T21:17:31.000000Z",
"data": {
"created_at": "2017-04-15T14:53:56.818837Z",
"subject_id": 2,
"subject_type": "vocabulary",
"level": 5,
"srs_stage": 9,
"unlocked_at": "2017-04-15T14:53:56.818837Z",
"started_at": "2017-04-15T14:53:56.818837Z",
"passed_at": None,
"burned_at": "2018-01-03T00:08:22.451866Z",
"available_at": None,
"resurrected_at": None,
"passed": True,
"hidden": False,
},
},
{
"id": 86555,
"object": "assignment",
"url": "https://api.wanikani.com/v2/assignments/86555",
"data_updated_at": "2018-05-09T21:17:31.000000Z",
"data": {
"created_at": "2017-04-15T14:50:51.503084Z",
"subject_id": 3,
"subject_type": "vocabulary",
"level": 5,
"srs_stage": 9,
"unlocked_at": "2017-04-15T14:50:51.503084Z",
"started_at": "2017-04-15T14:50:51.503084Z",
"passed_at": None,
"burned_at": "2018-02-19T23:02:25.053105Z",
"available_at": None,
"resurrected_at": None,
"passed": True,
"hidden": False,
},
},
{
"id": 86606,
"object": "assignment",
"url": "https://api.wanikani.com/v2/assignments/86606",
"data_updated_at": "2018-05-09T21:17:31.000000Z",
"data": {
"created_at": "2017-04-24T15:17:28.712677Z",
"subject_id": 1,
"subject_type": "vocabulary",
"level": 6,
"srs_stage": 9,
"unlocked_at": "2017-04-24T15:17:28.712677Z",
"started_at": "2017-04-24T15:17:28.712677Z",
"passed_at": "2017-05-10T13:52:56.699204Z",
"burned_at": "2018-02-19T22:46:09.144931Z",
"available_at": None,
"resurrected_at": None,
"passed": True,
"hidden": False,
},
},
],
}
REVIEW_STATISTICS_PAGE = {
"object": "collection",
"url": "https://api.wanikani.com/v2/review_statistics",
"pages": {
"per_page": 500,
"next_url": "https://api.wanikani.com/v2/review_statistics?page_after_id=62308745",
"previous_url": None,
},
"total_count": 5,
"data_updated_at": "2018-05-24T22:02:41.393482Z",
"data": [
{
"id": 85899,
"object": "review_statistic",
"url": "https://api.wanikani.com/v2/review_statistics/85899",
"data_updated_at": "2018-01-03T00:08:22.469272Z",
"data": {
"created_at": "2017-04-15T14:53:56.818837Z",
"subject_id": 1,
"subject_type": "vocabulary",
"meaning_correct": 13,
"meaning_incorrect": 2,
"meaning_max_streak": 7,
"meaning_current_streak": 7,
"reading_correct": 13,
"reading_incorrect": 0,
"reading_max_streak": 13,
"reading_current_streak": 13,
"percentage_correct": 93,
"hidden": False,
},
},
{
"id": 86555,
"object": "review_statistic",
"url": "https://api.wanikani.com/v2/review_statistics/86555",
"data_updated_at": "2018-02-19T23:02:25.114612Z",
"data": {
"created_at": "2017-04-15T14:50:51.503084Z",
"subject_id": 2,
"subject_type": "vocabulary",
"meaning_correct": 11,
"meaning_incorrect": 0,
"meaning_max_streak": 11,
"meaning_current_streak": 11,
"reading_correct": 11,
"reading_incorrect": 1,
"reading_max_streak": 7,
"reading_current_streak": 7,
"percentage_correct": 96,
"hidden": False,
},
},
{
"id": 86606,
"object": "review_statistic",
"url": "https://api.wanikani.com/v2/review_statistics/86606",
"data_updated_at": "2018-02-19T22:46:09.166397Z",
"data": {
"created_at": "2017-04-24T15:17:28.712677Z",
"subject_id": 3,
"subject_type": "vocabulary",
"meaning_correct": 8,
"meaning_incorrect": 0,
"meaning_max_streak": 8,
"meaning_current_streak": 8,
"reading_correct": 8,
"reading_incorrect": 0,
"reading_max_streak": 8,
"reading_current_streak": 8,
"percentage_correct": 100,
"hidden": False,
},
},
{
"id": 86625,
"object": "review_statistic",
"url": "https://api.wanikani.com/v2/review_statistics/86625",
"data_updated_at": "2018-02-19T23:54:40.912486Z",
"data": {
"created_at": "2017-04-24T15:17:29.061457Z",
"subject_id": 1,
"subject_type": "radical",
"meaning_correct": 8,
"meaning_incorrect": 0,
"meaning_max_streak": 8,
"meaning_current_streak": 8,
"reading_correct": 1,
"reading_incorrect": 0,
"reading_max_streak": 1,
"reading_current_streak": 1,
"percentage_correct": 100,
"hidden": False,
},
},
{
"id": 86891,
"object": "review_statistic",
"url": "https://api.wanikani.com/v2/review_statistics/86891",
"data_updated_at": "2018-05-24T21:35:18.556752Z",
"data": {
"created_at": "2017-04-24T15:17:38.685804Z",
"subject_id": 3,
"subject_type": "vocabulary",
"meaning_correct": 12,
"meaning_incorrect": 1,
"meaning_max_streak": 11,
"meaning_current_streak": 11,
"reading_correct": 12,
"reading_incorrect": 1,
"reading_max_streak": 9,
"reading_current_streak": 3,
"percentage_correct": 92,
"hidden": False,
},
},
],
}
STUDY_MATERIALS_PAGE = {
"object": "collection",
"url": "https://api.wanikani.com/v2/study_materials",
"pages": {"per_page": 500, "next_url": None, "previous_url": None},
"total_count": 3,
"data_updated_at": "2018-02-20T21:23:31.246408Z",
"data": [
{
"id": 1539170,
"object": "study_material",
"url": "https://api.wanikani.com/v2/study_materials/1539170",
"data_updated_at": "2017-06-01T19:01:36.573350Z",
"data": {
"created_at": "2017-02-01T15:55:42.058583Z",
"subject_id": 7518,
"subject_type": "vocabulary",
"meaning_note": None,
"reading_note": None,
"meaning_synonyms": ["young girl"],
"hidden": False,
},
},
{
"id": 1661853,
"object": "study_material",
"url": "https://api.wanikani.com/v2/study_materials/1661853",
"data_updated_at": "2017-06-07T00:23:41.431508Z",
"data": {
"created_at": "2017-04-08T14:02:50.758641Z",
"subject_id": 2798,
"subject_type": "vocabulary",
"meaning_note": None,
"reading_note": None,
"meaning_synonyms": ["balls"],
"hidden": False,
},
},
{
"id": 1678472,
"object": "study_material",
"url": "https://api.wanikani.com/v2/study_materials/1678472",
"data_updated_at": "2017-06-12T15:22:15.753065Z",
"data": {
"created_at": "2017-02-23T14:51:21.526934Z",
"subject_id": 3416,
"subject_type": "vocabulary",
"meaning_note": None,
"reading_note": None,
"meaning_synonyms": ["wool"],
"hidden": False,
},
},
],
}
SUMMARY = {
"object": "report",
"url": "https://api.wanikani.com/v2/summary",
"data_updated_at": "2018-07-02T07:00:00.000000Z",
"data": {
"lessons": [{"available_at": "2018-07-02T07:00:00.000000Z", "subject_ids": []}],
"next_reviews_at": "2018-07-02T09:00:00.000000Z",
"reviews": [
{"available_at": "2018-07-02T07:00:00.000000Z", "subject_ids": [1, 2, 3]},
{"available_at": "2018-07-02T08:00:00.000000Z", "subject_ids": [4, 5, 6]},
{"available_at": "2018-07-02T09:00:00.000000Z", "subject_ids": [647]},
{"available_at": "2018-07-02T10:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-02T11:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-02T12:00:00.000000Z", "subject_ids": []},
{
"available_at": "2018-07-02T13:00:00.000000Z",
"subject_ids": [8800, 2944, 2943],
},
{"available_at": "2018-07-02T14:00:00.000000Z", "subject_ids": []},
{
"available_at": "2018-07-02T15:00:00.000000Z",
"subject_ids": [
658,
8738,
3447,
6237,
3449,
3451,
7676,
7528,
7621,
7679,
2822,
3420,
657,
5717,
3436,
7677,
7678,
3452,
7529,
3450,
3438,
7568,
7675,
3437,
3422,
3448,
4877,
7734,
7735,
666,
646,
648,
],
},
{"available_at": "2018-07-02T16:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-02T17:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-02T18:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-02T19:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-02T20:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-02T21:00:00.000000Z", "subject_ids": [2841]},
{"available_at": "2018-07-02T22:00:00.000000Z", "subject_ids": []},
{
"available_at": "2018-07-02T23:00:00.000000Z",
"subject_ids": [2945, 672, 2956, 2932, 2981, 2953, 674, 2936, 654],
},
{"available_at": "2018-07-03T00:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-03T01:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-03T02:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-03T03:00:00.000000Z", "subject_ids": []},
{
"available_at": "2018-07-03T04:00:00.000000Z",
"subject_ids": [
671,
853,
3709,
2959,
4849,
2970,
2960,
2966,
2967,
2952,
2946,
8663,
2962,
2961,
2973,
2938,
2935,
2940,
7461,
2969,
2958,
2937,
7736,
2957,
8801,
2974,
677,
2939,
675,
663,
668,
650,
664,
670,
660,
676,
],
},
{"available_at": "2018-07-03T05:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-03T06:00:00.000000Z", "subject_ids": []},
{"available_at": "2018-07-03T07:00:00.000000Z", "subject_ids": []},
],
},
}
REVIEWS_PAGE = {
"object": "collection",
"url": "https://api.wanikani.com/v2/reviews",
"pages": {
"per_page": 1000,
"next_url": "https://api.wanikani.com/v2/reviews?page_after_id=168707639",
"previous_url": None,
},
"total_count": 3,
"data_updated_at": "2018-07-06T19:30:19.657822Z",
"data": [
{
"id": 6418820,
"object": "review",
"url": "https://api.wanikani.com/v2/reviews/6418820",
"data_updated_at": "2017-08-13T14:32:50.580980Z",
"data": {
"created_at": "2017-08-13T14:32:50.580980Z",
"assignment_id": 69392456,
"subject_id": 2514,
"starting_srs_stage": 8,
"ending_srs_stage": 9,
"incorrect_meaning_answers": 0,
"incorrect_reading_answers": 0,
},
},
{
"id": 6418839,
"object": "review",
"url": "https://api.wanikani.com/v2/reviews/6418839",
"data_updated_at": "2017-08-13T14:32:52.693772Z",
"data": {
"created_at": "2017-08-13T14:32:52.693772Z",
"assignment_id": 30950170,
"subject_id": 69,
"starting_srs_stage": 8,
"ending_srs_stage": 9,
"incorrect_meaning_answers": 0,
"incorrect_reading_answers": 0,
},
},
{
"id": 6418872,
"object": "review",
"url": "https://api.wanikani.com/v2/reviews/6418872",
"data_updated_at": "2017-08-13T14:32:56.587244Z",
"data": {
"created_at": "2017-08-13T14:32:56.587244Z",
"assignment_id": 30950168,
"subject_id": 60,
"starting_srs_stage": 8,
"ending_srs_stage": 9,
"incorrect_meaning_answers": 0,
"incorrect_reading_answers": 0,
},
},
],
}
LEVEL_PROGRESSIONS_PAGE = {
"object": "collection",
"url": "https://api.wanikani.com/v2/level_progressions",
"pages": {"per_page": 500, "next_url": None, "previous_url": None},
"total_count": 2,
"data_updated_at": "2018-07-05T18:03:21.967992Z",
"data": [
{
"id": 15446,
"object": "level_progression",
"url": "https://api.wanikani.com/v2/level_progressions/15446",
"data_updated_at": "2018-07-05T15:04:04.222661Z",
"data": {
"created_at": "2017-09-28T01:24:11.715238Z",
"level": 7,
"unlocked_at": "2017-06-12T15:24:48.181971Z",
"started_at": "2017-09-28T01:24:11.707880Z",
"passed_at": "2018-07-05T15:04:04.210181Z",
"completed_at": None,
"abandoned_at": None,
},
},
{
"id": 365549,
"object": "level_progression",
"url": "https://api.wanikani.com/v2/level_progressions/365549",
"data_updated_at": "2018-07-05T18:03:21.967992Z",
"data": {
"created_at": "2018-07-05T15:04:04.365184Z",
"level": 8,
"unlocked_at": "2018-07-05T15:04:04.338492Z",
"started_at": "2018-07-05T18:03:21.957917Z",
"passed_at": None,
"completed_at": None,
"abandoned_at": None,
},
},
],
}
RESETS_PAGE = {
"object": "collection",
"url": "https://api.wanikani.com/v2/resets",
"pages": {"per_page": 500, "next_url": None, "previous_url": None},
"total_count": 1,
"data_updated_at": "2018-03-21T22:07:39.261116Z",
"data": [
{
"id": 6529,
"object": "reset",
"url": "https://api.wanikani.com/v2/resets/6529",
"data_updated_at": "2018-03-21T22:07:39.261116Z",
"data": {
"created_at": "2018-03-21T22:04:13.313903Z",
"original_level": 13,
"target_level": 1,
"confirmed_at": "2018-03-21T22:05:44.454026Z",
},
}
],
}
|
80696
|
from textwrap import dedent
import attack_flow.graphviz
def test_convert_attack_flow_to_dot():
flow = {
"actions": [
{
"id": "action1",
"name": "action-one",
},
{
"id": "action2",
"name": "action-two",
},
],
"assets": [
{"id": "asset1"},
{"id": "asset2"},
],
"relationships": [
{
"source": "action1",
"target": "asset1",
},
{
"source": "asset1",
"target": "action2",
},
{
"source": "action2",
"target": "asset2",
},
],
}
output = attack_flow.graphviz.convert(flow)
assert output == dedent('''\
digraph {
node [shape=box,style="rounded,filled,fixedsize=true,width=2,height=1"]
"action1" [fillcolor=pink,label="action-one"]
"action2" [fillcolor=pink,label="action-two"]
"asset1" [fillcolor=lightblue1]
"asset2" [fillcolor=lightblue1]
}''')
def test_convert_complex_attack_flow_to_dot():
flow = {
"flow": {
"type": "attack-flow",
"id": "flow-1",
"name": "Attack Flow Export",
"author": "Unspecified",
"created": "2022-01-14T13:59:42-05:00"
},
"actions": [
{
"id": "flow-1/action-3",
"type": "action",
"name": "T1133: External Remote Services",
"description": "Kubernetes Dashboard",
"reference": "",
"succeeded": 1,
"confidence": 1,
"logic_operator_language": "",
"logic_operator": "AND"
},
{
"id": "flow-1/action-11",
"type": "action",
"name": "T1610: Deploy Container",
"description": "Deploy cryptomining container",
"reference": "",
"succeeded": 1,
"confidence": 1,
"logic_operator_language": "",
"logic_operator": "AND"
},
{
"id": "flow-1/action-12",
"type": "action",
"name": "T1552.001: Unsecured Credentials: Credentials In Files",
"description": "Harvest AWS service credentials.",
"reference": "",
"succeeded": 1,
"confidence": 0,
"logic_operator_language": "",
"logic_operator": "AND"
},
{
"id": "flow-1/action-17",
"type": "action",
"name": "T1496: Resource Highjacking",
"description": "Run cryptomining software",
"reference": "",
"succeeded": 1,
"confidence": 1,
"logic_operator_language": "",
"logic_operator": "AND"
},
{
"id": "flow-1/action-18",
"type": "action",
"name": "T1078.004: Valid Accounts: Cloud Accounts",
"description": "Use harvested AWS credentials",
"reference": "",
"succeeded": 1,
"confidence": 0,
"logic_operator_language": "",
"logic_operator": "AND"
},
{
"id": "flow-1/action-23",
"type": "action",
"name": "T1530: Data from Cloud Storage Object",
"description": "Download data from storage bucket",
"reference": "",
"succeeded": 1,
"confidence": 0,
"logic_operator_language": "",
"logic_operator": "AND"
}
],
"assets": [
{
"id": "flow-1/asset-1",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-7",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-9",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-13",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-15",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-19",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-21",
"type": "asset",
"state": "compromised"
},
{
"id": "flow-1/asset-24",
"type": "asset",
"state": "compromised"
}
],
"relationships": [
{
"source": "flow-1/asset-1",
"type": "flow-1#state",
"target": "flow-1/action-3"
},
{
"source": "flow-1/action-3",
"type": "flow-1#state-change",
"target": "flow-1/asset-7"
},
{
"source": "flow-1/action-3",
"type": "flow-1#state-change",
"target": "flow-1/asset-9"
},
{
"source": "flow-1/asset-7",
"type": "flow-1#state",
"target": "flow-1/action-11"
},
{
"source": "flow-1/asset-9",
"type": "flow-1#state",
"target": "flow-1/action-12"
},
{
"source": "flow-1/action-11",
"type": "flow-1#state-change",
"target": "flow-1/asset-13"
},
{
"source": "flow-1/action-12",
"type": "flow-1#state-change",
"target": "flow-1/asset-15"
},
{
"source": "flow-1/asset-13",
"type": "flow-1#state",
"target": "flow-1/action-17"
},
{
"source": "flow-1/asset-15",
"type": "flow-1#state",
"target": "flow-1/action-18"
},
{
"source": "flow-1/action-17",
"type": "flow-1#state-change",
"target": "flow-1/asset-19"
},
{
"source": "flow-1/action-18",
"type": "flow-1#state-change",
"target": "flow-1/asset-21"
},
{
"source": "flow-1/asset-21",
"type": "flow-1#state",
"target": "flow-1/action-23"
},
{
"source": "flow-1/action-23",
"type": "flow-1#state-change",
"target": "flow-1/asset-24"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-3"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-11"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-12"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-17"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-18"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/action-23"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-1"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-7"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-9"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-13"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-15"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-19"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-21"
},
{
"source": "flow-1",
"type": "flow-1#flow-edge",
"target": "flow-1/asset-24"
}
],
"object_properties": [],
"data_properties": [
{
"source": "flow-1/asset-1",
"type": "flow-1#description",
"target": "Kubernetes Dashboard"
},
{
"source": "flow-1/asset-1",
"type": "flow-1#state",
"target": "exposed"
},
{
"source": "flow-1/asset-1",
"type": "flow-1#state",
"target": "unsecured"
},
{
"source": "flow-1/asset-7",
"type": "flow-1#description",
"target": "Kubernetes Cluster"
},
{
"source": "flow-1/asset-9",
"type": "flow-1#description",
"target": "Kubernetes Admin Priv"
},
{
"source": "flow-1/asset-13",
"type": "flow-1#description",
"target": "Kubernetes Container"
},
{
"source": "flow-1/asset-15",
"type": "flow-1#description",
"target": "AWS Credentials"
},
{
"source": "flow-1/asset-19",
"type": "flow-1#description",
"target": "Cryptocurrency"
},
{
"source": "flow-1/asset-21",
"type": "flow-1#description",
"target": "AWS Access"
},
{
"source": "flow-1/asset-24",
"type": "flow-1#description",
"target": "Data"
}
]
}
output = attack_flow.graphviz.convert(flow)
assert output == dedent('''\
digraph {
node [shape=box,style="rounded,filled,fixedsize=true,width=2,height=1"]
"flow-1/action-3" [fillcolor=pink,label="T1133: External\\nRemote Services"]
"flow-1/action-11" [fillcolor=pink,label="T1610: Deploy\\nContainer"]
"flow-1/action-12" [fillcolor=pink,label="T1552.001: Unsecured\\nCredentials:\\nCredentials In Files"]
"flow-1/action-17" [fillcolor=pink,label="T1496: Resource\\nHighjacking"]
"flow-1/action-18" [fillcolor=pink,label="T1078.004: Valid\\nAccounts: Cloud\\nAccounts"]
"flow-1/action-23" [fillcolor=pink,label="T1530: Data from\\nCloud Storage Object"]
"flow-1/asset-1" [fillcolor=lightblue1,label="Kubernetes Dashboard"]
"flow-1/asset-7" [fillcolor=lightblue1,label="Kubernetes Cluster"]
"flow-1/asset-9" [fillcolor=lightblue1,label="Kubernetes Admin\\nPriv"]
"flow-1/asset-13" [fillcolor=lightblue1,label="Kubernetes Container"]
"flow-1/asset-15" [fillcolor=lightblue1,label="AWS Credentials"]
"flow-1/asset-19" [fillcolor=lightblue1,label="Cryptocurrency"]
"flow-1/asset-21" [fillcolor=lightblue1,label="AWS Access"]
"flow-1/asset-24" [fillcolor=lightblue1,label="Data"]
"flow-1/asset-1" -> "flow-1/action-3" [label="requires"]
"flow-1/action-3" -> "flow-1/asset-7" [label="provides"]
"flow-1/action-3" -> "flow-1/asset-9" [label="provides"]
"flow-1/asset-7" -> "flow-1/action-11" [label="requires"]
"flow-1/asset-9" -> "flow-1/action-12" [label="requires"]
"flow-1/action-11" -> "flow-1/asset-13" [label="provides"]
"flow-1/action-12" -> "flow-1/asset-15" [label="provides"]
"flow-1/asset-13" -> "flow-1/action-17" [label="requires"]
"flow-1/asset-15" -> "flow-1/action-18" [label="requires"]
"flow-1/action-17" -> "flow-1/asset-19" [label="provides"]
"flow-1/action-18" -> "flow-1/asset-21" [label="provides"]
"flow-1/asset-21" -> "flow-1/action-23" [label="requires"]
"flow-1/action-23" -> "flow-1/asset-24" [label="provides"]
"flow-1/asset-1-exposed-state" [fillcolor=lightgreen,label="exposed"]
"flow-1/asset-1-unsecured-state" [fillcolor=lightgreen,label="unsecured"]
"flow-1/asset-1-exposed-state" -> "flow-1/asset-1" [dir=none,style=dashed]
"flow-1/asset-1-unsecured-state" -> "flow-1/asset-1" [dir=none,style=dashed]
}''') # noqa: E501
def test_align_node_label_one_liner():
assert attack_flow.graphviz.align_node_label("one liner") == "one liner"
def test_align_node_label_multiline():
assert attack_flow.graphviz.align_node_label("multi liner label example", width=15) == "multi liner\\nlabel example"
def test_align_node_label_string_escaping():
assert attack_flow.graphviz.align_node_label("a \"tricky\" example") == 'a \\"tricky\\" example'
|
80709
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import numpy as np
def dntrack(df: pd.DataFrame,
rho: (list,str) = None,
ntr: (list,str) = None,
lims: list = None,
lime: bool = False,
dtick: bool =False,
fill: bool =True,
fontsize: int=8,
grid_numbers : list = [11,51],
steps: list = None,
correlation: pd.DataFrame = None,
rho_kw:dict={},
ntr_kw:dict={},
corr_kw:dict={},
ax=None,
rho_colormap:str='hot',
ntr_colormap:str='winter',
depth_ref:str='md'
):
"""dntrack [summary]
Parameters
----------
df : pd.DataFrame
[description]
rho : [type], optional
[description], by default None
ntr : [type], optional
[description], by default None
lims : list, optional
[description], by default None
lime : bool, optional
[description], by default False
dtick : bool, optional
[description], by default False
fill : bool, optional
[description], by default True
fontsize : int, optional
[description], by default 8
grid_numbers : list, optional
[description], by default [11,51]
steps : list, optional
[description], by default None
correlation : pd.DataFrame, optional
[description], by default None
rho_kw : dict, optional
[description], by default {}
ntr_kw : dict, optional
[description], by default {}
corr_kw : dict, optional
[description], by default {}
ax : [type], optional
[description], by default None
rho_colormap : str, optional
[description], by default 'hot'
ntr_colormap : str, optional
[description], by default 'winter'
depth_ref : str, optional
[description], by default 'md'
"""
assert isinstance(df,pd.DataFrame)
assert depth_ref in ['md','tvd','tvdss'], "depth_ref can only be one of ['md','tvd','tvdss']"
#Set Axes
dax=ax or plt.gca()
nax=dax.twiny()
# Default kwargs for rho and ntr lines
def_rho_kw = {
'color': 'darkred',
'linestyle':'-',
'linewidth': 2
}
for (k,v) in def_rho_kw.items():
if k not in rho_kw:
rho_kw[k]=v
def_ntr_kw = {
'color': 'darkblue',
'linestyle':'-',
'linewidth': 1
}
for (k,v) in def_ntr_kw.items():
if k not in ntr_kw:
ntr_kw[k]=v
def_corr_kw = {
'color': 'red',
'linestyle':'--',
'linewidth': 2
}
for (k,v) in def_corr_kw.items():
if k not in corr_kw:
corr_kw[k]=v
#Set type of sync between Neutron GammaRay
if lime==True:
d=2.71
else:
d=2.65
m=(d-1.9)/(0-0.45)
b=-m*0.45+1.9
rholim=-0.15*m+b
#Set the vertical grid spacing
if steps is None:
mayor_grid = np.linspace(lims[0],lims[1],grid_numbers[0])
minor_grid = np.linspace(lims[0],lims[1],grid_numbers[1])
else:
mayor_grid = np.arange(lims[0],lims[1],steps[0])
minor_grid = np.arange(lims[0],lims[1],steps[1])
depth = df.index if depth_ref=='md' else df[depth_ref]
#Set Density Axes
if rho is not None:
if isinstance(rho,str):
dax.plot(df[rho],depth,**rho_kw) #Plotting
elif isinstance(rho,list):
cmap = mpl.cm.get_cmap(rho_colormap,len(rho))
for i,r in enumerate(rho):
rho_kw['color']=cmap(i)
dax.plot(df[r],depth,**rho_kw)
#Set the gridding and ticks
dax.set_xlabel("Density [g/cc]")
dax.set_xticks(np.linspace(1.9,rholim,4))
dax.set_xlim([1.9,rholim])
dax.tick_params("both",labelsize=fontsize)
dax.grid(True,linewidth=1.0)
dax.grid(True,which='minor', linewidth=0.5)
dax.set_yticks(minor_grid,minor=True)
dax.set_yticks(mayor_grid)
if dtick==True:
dax.set_yticklabels(mayor_grid)
else:
dax.set_yticklabels([])
#Set neutron axes
if ntr is not None:
if isinstance(ntr,str):
nax.plot(df[ntr],depth,**ntr_kw) #Plotting
elif isinstance(ntr,list):
cmap = mpl.cm.get_cmap(ntr_colormap,len(ntr))
for i,r in enumerate(ntr):
ntr_kw['color']=cmap(i)
nax.plot(df[r],depth,**ntr_kw)
nax.set_xlabel("Neutron [v/v]")
nax.set_xticks(np.linspace(0.45,-0.15,4))
nax.set_xlim([0.45,-0.15])
nax.tick_params("both",labelsize=fontsize)
nax.set_yticks(minor_grid,minor=True)
nax.set_yticks(mayor_grid)
if dtick==True:
nax.set_yticklabels(mayor_grid)
else:
nax.set_yticklabels([])
if lims==None: #Depth Limits
lims=[depth.min(),depth.max()]
dax.set_ylim([lims[1],lims[0]])
#Convert the Neutron values to Density Values in order to fill the cross Density-Neutron
#When the track is callibrated for sandstone use m=-1.666667 and b=2.65
#When the track is callibrated for limestone use m=-1.8 and b=2.71
if (ntr is not None) & (rho is not None):
NtrTorho=df[ntr]*m+b
ntrrho=NtrTorho.values.ravel()
if fill==True:
dax.fill_betweenx(depth,df[rho],ntrrho,where=(df[rho] < ntrrho),color="red")
#Add Correlation Line
if correlation is not None:
cor_ann = corr_kw.pop('ann',False)
cor_ann_fontsize = corr_kw.pop('fontsize',8)
for i in correlation.iterrows():
if depth_ref == 'tvdss':
if i[1]['depth'] >= lims[0] or i[1]['depth'] <= lims[1]:
continue
else:
if i[1]['depth'] < lims[0] or i[1]['depth'] > lims[1]:
continue
dax.hlines(i[1]['depth'],0,rholim, **corr_kw)
if cor_ann:
try:
dax.annotate(f"{i[1]['depth']} - {i[1]['comment']} ",xy=(rholim-0.3,i[1]['depth']-1),
xycoords='data',horizontalalignment='right',bbox={'boxstyle':'roundtooth', 'fc':'0.8'},
fontsize = cor_ann_fontsize)
except:
dax.annotate(f"{i[1]['depth']}",xy=(rholim-3,i[1]['depth']-1),
xycoords='data',horizontalalignment='right',
bbox={'boxstyle':'roundtooth', 'fc':'0.8'},
fontsize = cor_ann_fontsize)
|
80735
|
from sys import argv
import re
regex_pattern_string = argv[1]
strings_to_match = argv[2:]
pattern = re.compile(regex_pattern_string)
def print_case(s):
if pattern.match(s): # This is where the work happens
prefix = "Match: \t"
else:
prefix = "No match:\t"
print prefix, s
map(print_case, strings_to_match)
### This is basically equivalent to:
# for s in strings_to_match:
# print_case(s)
|
80738
|
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.patches import Circle
import matplotlib.patches as patches
from . import field_info as f
def draw_field(axis=plt.gca()):
mpl.rcParams['lines.linewidth'] = 2.33513514 # was 2 before
ax = axis
# draw field background
ax.add_patch(
patches.Rectangle(
(-f.x_field_length / 2, -f.y_field_length / 2), f.x_field_length, f.y_field_length,
facecolor="green", zorder=0
)
)
ax.plot([0, 0], [-f.y_length * 0.5, f.y_length * 0.5], 'white') # Middle line
ax.plot([f.x_opponent_groundline, f.x_opponent_groundline],
[f.y_left_sideline, f.y_right_sideline], 'white') # opponent ground line
ax.plot([f.x_own_groundline, f.x_own_groundline], [f.y_right_sideline, f.y_left_sideline],
'white') # own ground line
ax.plot([f.x_own_groundline, f.x_opponent_groundline], [f.y_left_sideline, f.y_left_sideline],
'white')
ax.plot([f.x_own_groundline, f.x_opponent_groundline],
[f.y_right_sideline, f.y_right_sideline], 'white')
ax.plot([f.x_opponent_groundline - f.x_penalty_area_length,
f.x_opponent_groundline - f.x_penalty_area_length],
[-f.y_penalty_area_length * 0.5, f.y_penalty_area_length * 0.5],
'white') # opp penalty
ax.plot([f.x_opponent_groundline, f.x_opponent_groundline - f.x_penalty_area_length],
[f.y_penalty_area_length * 0.5, f.y_penalty_area_length * 0.5], 'white') # opp penalty
ax.plot([f.x_opponent_groundline, f.x_opponent_groundline - f.x_penalty_area_length],
[-f.y_penalty_area_length * 0.5, -f.y_penalty_area_length * 0.5],
'white') # opp penalty
ax.plot([f.x_own_groundline + f.x_penalty_area_length,
f.x_own_groundline + f.x_penalty_area_length],
[-f.y_penalty_area_length * 0.5, f.y_penalty_area_length * 0.5],
'white') # own penalty
ax.plot([f.x_own_groundline, f.x_own_groundline + f.x_penalty_area_length],
[f.y_penalty_area_length * 0.5, f.y_penalty_area_length * 0.5], 'white') # own penalty
ax.plot([f.x_own_groundline, f.x_own_groundline + f.x_penalty_area_length],
[-f.y_penalty_area_length * 0.5, -f.y_penalty_area_length * 0.5],
'white') # own penalty
# Middle Circle
ax.add_artist(Circle(xy=(0, 0), radius=f.center_circle_radius, fill=False, edgecolor='white'))
# Penalty Marks
ax.add_artist(Circle(xy=(f.x_opponent_groundline - f.x_penalty_mark_distance, 0),
radius=f.penalty_cross_radius, color='white'))
ax.add_artist(Circle(xy=(f.x_own_groundline + f.x_penalty_mark_distance, 0),
radius=f.penalty_cross_radius, color='white'))
# Own goal box
ax.add_artist(
Circle(xy=(f.own_goalpost_right.x, f.own_goalpost_right.y), radius=f.goalpost_radius,
color='white')) # GoalPostRight
ax.add_artist(
Circle(xy=(f.own_goalpost_left.x, f.own_goalpost_left.y), radius=f.goalpost_radius,
color='white')) # GoalPostLeft
ax.plot([f.x_own_groundline, f.x_own_groundline - f.goal_depth],
[-f.goal_width * 0.5, -f.goal_width * 0.5], 'white') # own goal box
ax.plot([f.x_own_groundline, f.x_own_groundline - f.goal_depth],
[f.goal_width * 0.5, f.goal_width * 0.5], 'white') # own goal box
ax.plot([f.x_own_groundline - f.goal_depth, f.x_own_groundline - f.goal_depth],
[-f.goal_width * 0.5, f.goal_width * 0.5], 'white') # own goal box
# Opp GoalBox
ax.add_artist(Circle(xy=(f.opponent_goalpost_right.x, f.opponent_goalpost_right.y),
radius=f.goalpost_radius, color='white')) # GoalPostRight
ax.add_artist(Circle(xy=(f.opponent_goalpost_left.x, f.opponent_goalpost_left.y),
radius=f.goalpost_radius, color='white')) # GoalPostLeft
ax.plot([f.x_opponent_groundline, f.x_opponent_groundline + f.goal_depth],
[-f.goal_width * 0.5, -f.goal_width * 0.5], 'white') # Opp goal box
ax.plot([f.x_opponent_groundline, f.x_opponent_groundline + f.goal_depth],
[f.goal_width * 0.5, f.goal_width * 0.5], 'white') # Opp goal box
ax.plot([f.x_opponent_groundline + f.goal_depth, f.x_opponent_groundline + f.goal_depth],
[-f.goal_width * 0.5, f.goal_width * 0.5], 'white') # Opp goal box
ax.set_xlim([-f.x_field_length * 0.5, f.x_field_length * 0.5])
ax.set_ylim([-f.y_field_length * 0.5, f.y_field_length * 0.5])
# ax.set_facecolor('green')
ax.set_aspect("equal")
|
80792
|
import mgear
VERSION_MAJOR = mgear.VERSION[0]
VERSION_MINOR = mgear.VERSION[1]
VERSION_PATCH = mgear.VERSION[2]
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
version = '%i.%i.%i' % version_info
__version__ = version
__all__ = ['version', 'version_info', '__version__']
|
80807
|
from __future__ import annotations
import asyncio
import json
import logging
import os
from typing import Any, Mapping, Optional, Union
import aiohttp
from aiohttp import hdrs
from aiohttp.client import _RequestContextManager
from .auth import Auth
from .request import ClientRequest
from .typedefs import WsBytesHandler, WsJsonHandler, WsStrHandler
from .ws import ClientWebSocketResponse, ws_run_forever
logger = logging.getLogger(__name__)
class Client:
"""
HTTPリクエストクライアントクラス
.. note::
引数 apis は省略できます。
:Example:
.. code-block:: python
async def main():
async with pybotters.Client(apis={'example': ['KEY', 'SECRET']}) as client:
r = await client.get('https://...', params={'foo': 'bar'})
print(await r.json())
.. code-block:: python
async def main():
async with pybotters.Client(apis={'example': ['KEY', 'SECRET']}) as client:
wstask = await client.ws_connect(
'wss://...',
send_json={'foo': 'bar'},
hdlr_json=pybotters.print_handler
)
await wstask
# Ctrl+C to break
Basic API
パッケージトップレベルで利用できるHTTPリクエスト関数です。 これらは同期関数です。 内部的にpybotters.Clientをラップしています。
:Example:
.. code-block:: python
r = pybotters.get(
'https://...',
params={'foo': 'bar'},
apis={'example': ['KEY', 'SECRET']}
)
print(r.text())
print(r.json())
.. code-block:: python
pybotters.ws_connect(
'wss://...',
send_json={'foo': 'bar'},
hdlr_json=pybotters.print_handler,
apis={'example': ['KEY', 'SECRET']}
)
# Ctrl+C to break
"""
_session: aiohttp.ClientSession
_base_url: str
def __init__(
self,
apis: Optional[Union[dict[str, list[str]], str]] = None,
base_url: str = '',
**kwargs: Any,
) -> None:
"""
:param apis: APIキー・シークレットのデータ(optional) ex: {'exchange': ['key', 'secret']}
:param base_url: リクエストメソッドの url の前方に自動付加するURL(optional)
:param ``**kwargs``: aiohttp.Client.requestに渡されるキーワード引数(optional)
"""
self._session = aiohttp.ClientSession(
request_class=ClientRequest,
ws_response_class=ClientWebSocketResponse,
**kwargs,
)
apis = self._load_apis(apis)
self._session.__dict__['_apis'] = self._encode_apis(apis)
self._base_url = base_url
async def __aenter__(self) -> 'Client':
return self
async def __aexit__(self, *args: Any) -> None:
await self.close()
async def close(self) -> None:
await self._session.close()
def _request(
self,
method: str,
url: str,
*,
params: Optional[Mapping[str, Any]] = None,
data: Optional[dict[str, Any]] = None,
auth: Optional[Auth] = Auth,
**kwargs: Any,
) -> _RequestContextManager:
return self._session.request(
method=method,
url=self._base_url + url,
params=params,
data=data,
auth=auth,
**kwargs,
)
def request(
self,
method: str,
url: str,
*,
params: Optional[Mapping[str, str]] = None,
data: Any = None,
**kwargs: Any,
) -> _RequestContextManager:
"""
:param method: GET, POST, PUT, DELETE などのHTTPメソッド
:param url: リクエストURL
:param params: URLのクエリ文字列(optional)
:param data: リクエストボディ(optional)
:param headers: リクエストヘッダー(optional)
:param auth: API自動認証の機能の有効/無効。デフォルトで有効。auth=Noneを指定することで無効になります(optional)
:param ``kwargs``: aiohttp.Client.requestに渡されるキーワード引数(optional)
"""
return self._request(method, url, params=params, data=data, **kwargs)
def get(
self,
url: str,
*,
params: Optional[Mapping[str, str]] = None,
**kwargs: Any,
) -> _RequestContextManager:
return self._request(hdrs.METH_GET, url, params=params, **kwargs)
def post(
self,
url: str,
*,
data: Any = None,
**kwargs: Any,
) -> _RequestContextManager:
return self._request(hdrs.METH_POST, url, data=data, **kwargs)
def put(
self,
url: str,
*,
data: Any = None,
**kwargs: Any,
) -> _RequestContextManager:
return self._request(hdrs.METH_PUT, url, data=data, **kwargs)
def delete(
self,
url: str,
*,
data: Any = None,
**kwargs: Any,
) -> _RequestContextManager:
return self._request(hdrs.METH_DELETE, url, data=data, **kwargs)
async def ws_connect(
self,
url: str,
*,
send_str: Optional[Union[str, list[str]]] = None,
send_bytes: Optional[Union[bytes, list[bytes]]] = None,
send_json: Any = None,
hdlr_str: Optional[WsStrHandler] = None,
hdlr_bytes: Optional[WsBytesHandler] = None,
hdlr_json: Optional[WsJsonHandler] = None,
**kwargs: Any,
) -> asyncio.Task:
"""
:param url: WebSocket URL
:param send_str: WebSocketで送信する文字列。文字列、または文字列のリスト形式(optional)
:param send_json: WebSocketで送信する辞書オブジェクト。辞書、または辞書のリスト形式(optional)
:param hdlr_str: WebSocketの受信データをハンドリングする関数。
第1引数 msg に _str_型, 第2引数 ws にWebSocketClientResponse 型の変数が渡されます(optional)
:param hdlr_json: WebSocketの受信データをハンドリングする関数。
第1引数 msg に Any 型(JSON-like), 第2引数 ws に WebSocketClientResponse 型の変数が渡されます
(optional)
:param headers: リクエストヘッダー(optional)
:param auth: API自動認証の機能の有効/無効。デフォルトで有効。auth=Noneを指定することで無効になります(optional)
:param ``**kwargs``: aiohttp.ClientSession.ws_connectに渡されるキーワード引数(optional)
"""
event = asyncio.Event()
task = asyncio.create_task(
ws_run_forever(
url,
self._session,
event,
send_str=send_str,
send_bytes=send_bytes,
send_json=send_json,
hdlr_str=hdlr_str,
hdlr_bytes=hdlr_bytes,
hdlr_json=hdlr_json,
**kwargs,
)
)
await event.wait()
return task
@staticmethod
def _load_apis(
apis: Optional[Union[dict[str, list[str]], str]]
) -> dict[str, list[str]]:
if apis is None:
apis = {}
if isinstance(apis, dict):
if apis:
return apis
else:
current_apis = os.path.join(os.getcwd(), 'apis.json')
if os.path.isfile(current_apis):
with open(current_apis) as fp:
return json.load(fp)
else:
env_apis = os.getenv('PYBOTTERS_APIS')
if env_apis and os.path.isfile(env_apis):
with open(env_apis) as fp:
return json.load(fp)
else:
return apis
elif isinstance(apis, str):
with open(apis) as fp:
return json.load(fp)
else:
logger.warning(f'apis must be dict or str, not {apis.__class__.__name__}')
return {}
@staticmethod
def _encode_apis(
apis: Optional[dict[str, list[str]]]
) -> dict[str, tuple[str, bytes]]:
if apis is None:
apis = {}
encoded = {}
for name in apis:
if len(apis[name]) == 2:
encoded[name] = (apis[name][0], apis[name][1].encode())
return encoded
|
80812
|
import math, random, sys, os
import numpy as np
import pandas as pd
import networkx as nx
import func_timeout
import tqdm
from rdkit import RDLogger
from rdkit.Chem import Descriptors
from rdkit.Chem import rdmolops
import rdkit.Chem.QED
import torch
from botorch.models import SingleTaskGP
from botorch.fit import fit_gpytorch_model
from botorch.utils import standardize
from gpytorch.mlls import ExactMarginalLogLikelihood
from botorch.acquisition import UpperConfidenceBound,ExpectedImprovement,ProbabilityOfImprovement, qExpectedImprovement, qUpperConfidenceBound, qNoisyExpectedImprovement
from botorch.optim import optimize_acqf
from utils import sascorer, quality_filters as qual
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
###################################### NUMERICAL STABILITY ##################################################
def LSE(log_ai):
"""
log_ai of dim (batch_size, num_items_in_sum)
"""
max_log,_ = log_ai.max(dim=1,keepdim=True)
return max_log.squeeze() + torch.log(torch.exp(log_ai - max_log).sum(dim=1,keepdim=False))
def LDE(log_ai,log_bi):
max_log_p = torch.max(log_ai,log_bi)
min_log_p = torch.min(log_ai,log_bi)
return (max_log_p + torch.log(1 - torch.exp(min_log_p - max_log_p)))
###################################### MOLECULE PROPERTIES ##################################################
logP_file = dir_path + os.sep + 'stats_training_data/logP_values.txt'
SAS_file = dir_path + os.sep + 'stats_training_data/SA_scores.txt'
cycle_file = dir_path + os.sep + 'stats_training_data/cycle_scores.txt'
logP_values = np.loadtxt(logP_file)
SAS_values = np.loadtxt(SAS_file)
cycle_values = np.loadtxt(cycle_file)
training_stats = {
'logP_mean':np.mean(logP_values),
'logP_std':np.std(logP_values),
'SAS_mean':np.mean(SAS_values),
'SAS_std':np.std(SAS_values),
'cycles_mean':np.mean(cycle_values),
'cycles_std':np.std(cycle_values)
}
#Property stats training data
final_logP_train_stats_raw={'mean': -0.002467457978476197, 'std': 2.056736565112327, 'median': 0.42761702630532883, 'min': -62.516944569759666, 'max': 4.519902819580757, 'P1': -6.308202037634639, 'P5': -3.7061575195672125, 'P10': -2.6097184083169522, 'P25': -1.0492552134450062, 'P75': 1.4174359964331003, 'P90': 2.1113332292393188, 'P95': 2.4569317747277495, 'P99': 3.0048043651582605}
final_logP_train_stats_normalized={'mean': -0.0013269769793680093, 'std': 1.0022175676799359, 'median': 0.20822120507327543, 'min': -30.46370322413232, 'max': 2.2023601097894416, 'P1': -3.0740150902231402, 'P5': -1.8060773166698125, 'P10': -1.2717987692036161, 'P25': -0.5114081551001504, 'P75': 0.6905739551134478, 'P90': 1.0286998043562519, 'P95': 1.1971048594070872, 'P99': 1.464075062137245}
#Decoder uncertainty stats
decoder_uncertainty_stats_training ={
'JTVAE': {
'MI_Importance_sampling': {'mean': 0.7737001577503979, 'std': 0.7191886465214079, 'median': 0.6115016341209412, 'min': 0.003500204300507903, 'max': 3.2164592742919917, 'P1': 0.004812391460873187, 'P5': 0.03621037751436234, 'P25': 0.16248027607798576, 'P75': 1.1251116693019867, 'P95': 2.4251182436943055, 'P99': 2.9005215597152705},
'NLL_prior': {'mean': 110.49043981933593, 'std': 22.952045705008157, 'median': 106.87257385253906, 'min': 80.51214599609375, 'max': 199.3219451904297, 'P1': 83.63397506713868, 'P5': 86.59568367004395, 'P25': 96.7742748260498, 'P75': 117.61268424987794, 'P95': 147.31882400512683, 'P99': 195.5686897277832}
}
}
def verify_smile(smile):
return (smile != '') and pd.notnull(smile) and (rdkit.Chem.MolFromSmiles(smile) is not None)
def clean_up_smiles(smiles):
return list(map(lambda x: x.strip(), smiles))
def compute_qed(smile, default_value=np.nan):
try:
mol= rdkit.Chem.MolFromSmiles(smile)
qed = rdkit.Chem.QED.qed(mol)
return qed
except:
return default_value
def compute_sas(smile, default_value=np.nan):
try:
mol = rdkit.Chem.MolFromSmiles(smile)
sas = sascorer.calculateScore(mol)
return sas
except:
return default_value
def compute_logP(smile, default_value=np.nan):
try:
mol = rdkit.Chem.MolFromSmiles(smile)
logp = Descriptors.MolLogP(mol)
return logp
except:
return default_value
def compute_logPminusSAS_score(smile, default_value=np.nan):
try:
mol = rdkit.Chem.MolFromSmiles(smile)
score = Descriptors.MolLogP(mol) - sascorer.calculateScore(mol)
return score
except:
return default_value
def compute_target_logP(smile, default_value=np.nan, train_stats = training_stats):
try:
mol = rdkit.Chem.MolFromSmiles(smile)
logP_score = Descriptors.MolLogP(mol)
SAS_score = - sascorer.calculateScore(mol)
cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(mol)))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
cycle_score = - cycle_length
logP_score_normalized = (logP_score - train_stats['logP_mean']) / train_stats['logP_std']
SAS_score_normalized = (SAS_score - train_stats['SAS_mean']) / train_stats['SAS_std']
cycle_score_normalized = (cycle_score - train_stats['cycles_mean']) / train_stats['cycles_std']
return logP_score_normalized + SAS_score_normalized + cycle_score_normalized
except:
return default_value
def convert_tensors_to_smiles(tensor_molecules, indices_chars):
"""
For CharVAE only. Input tensor_molecules of size (batch, seq_len, n_chars)
"""
smiles = []
for molecule in tensor_molecules.detach().cpu().numpy():
temp_str = ""
for atom_j in molecule:
index = np.argmax(atom_j)
temp_str += indices_chars[index]
smiles.append(temp_str)
return np.array(clean_up_smiles(smiles))
def compute_stats(input_array, mode="nan"):
if mode =="normal":
return {
'mean':input_array.mean(),
'std':input_array.std(),
'median':np.median(input_array),
'min':input_array.min(),
'max':input_array.max(),
'P1':np.percentile(input_array,1),
'P5':np.percentile(input_array,5),
'P25':np.percentile(input_array,25),
'P75':np.percentile(input_array,75),
'P95':np.percentile(input_array,95),
'P99':np.percentile(input_array,99)
}
elif mode=="nan":
return {
'mean':np.nanmean(input_array),
'std': np.nanstd(input_array),
'median':np.nanmedian(input_array),
'min':np.nanmin(input_array),
'max':np.nanmax(input_array),
'P1': np.nanpercentile(input_array,1),
'P5': np.nanpercentile(input_array,5),
'P25':np.nanpercentile(input_array,25),
'P75':np.nanpercentile(input_array,75),
'P95':np.nanpercentile(input_array,95),
'P99':np.nanpercentile(input_array,99)
}
def check_validity_objects(smiles, return_valid=True):
"""smiles is a list of molecule SMILE representation.
Returns valid smiles generated by default, since needed to compute unicity and novelty."""
num_molecules=len(smiles)
if num_molecules==0:
print("No valid modelcule generated!")
return 0
valid_smiles=[]
for smile in smiles:
if verify_smile(smile):
valid_smiles.append(smile)
if return_valid:
return len(valid_smiles) / float(num_molecules), valid_smiles
else:
return len(valid_smiles) / float(num_molecules)
def check_unicity_objects(smiles):
"""Need to pass in valid smiles"""
unique_smiles = set() #empty set
num_molecules=len(smiles)
if num_molecules==0:
return 0
else:
for smile in smiles:
if smile not in unique_smiles:
unique_smiles.add(smile)
return len(unique_smiles)/float(num_molecules)
def check_novelty_objects(smiles,training_smiles, verbose=False):
"""Need to pass in valid smiles"""
count_in_training=0
num_molecules_generated = len(smiles)
if num_molecules_generated==0:
return 0
else:
training_smiles_set=set(training_smiles)
num_molecules_training = len(training_smiles_set)
if verbose:
print("Num distinct molecules in training data: "+str(num_molecules_training))
for smile in smiles:
if smile in training_smiles_set:
count_in_training+=1
if verbose:
print("Num generated molecules that were already in training data: "+str(count_in_training))
return 1 - count_in_training/float(num_molecules_generated)
def log_stats(file_name, stats, log_entry):
with open(file_name, "a+") as logs_file:
logs_file.write(log_entry+"\n")
logs_file.write(str(stats))
logs_file.write("\n\n")
class assessment_generated_objects():
def __init__(self, generated_objects_list, model_training_data, prop="final_logP"):
"""Function that returns the property of the best 3 objects generated; top 50 (or less if fewer valid) and average over all generated
Also return % valid, %unique, %novel of all generated elements"""
self.num_generated_objects = len(generated_objects_list)
#Compute Validity of generated objects
self.validity_all, self.valid_generated_objects_list = check_validity_objects(generated_objects_list, return_valid=True)
self.num_valid_generated_objects = len(self.valid_generated_objects_list)
#Compute Properties of generated objects
self.property_generated_objects = []
if prop=="QED":
for valid_generated_object in self.valid_generated_objects_list:
self.property_generated_objects.append(compute_qed(valid_generated_object))
elif prop=="logPminusSAS":
for valid_generated_object in self.valid_generated_objects_list:
self.property_generated_objects.append(compute_logPminusSAS_score(valid_generated_object))
elif prop=="final_logP":
for valid_generated_object in self.valid_generated_objects_list:
self.property_generated_objects.append(compute_target_logP(valid_generated_object))
try:
self.stats_property_generated_objects = compute_stats(np.array(self.property_generated_objects))
except:
self.stats_property_generated_objects = None
property_df = pd.DataFrame({'Valid_generated_objects': np.array(self.valid_generated_objects_list),
'Property_valid_generated_objects': np.array(self.property_generated_objects)})
#quality_filters
if len(property_df)>0:
QF = qual.QualityFiltersCheck(training_data_smi=[])
property_df['Pass_quality_filters']= QF.check_smiles_pass_quality_filters_flag(self.valid_generated_objects_list).astype(bool)
property_df.sort_values(by=['Property_valid_generated_objects'], ascending=False, inplace=True)
#De-normalize scores
if prop=="final_logP":
property_df['Property_valid_generated_objects'] = property_df['Property_valid_generated_objects']*final_logP_train_stats_raw['std'] + final_logP_train_stats_raw['mean']
property_df.reset_index(inplace=True, drop=True)
self.top10_valid_molecules = property_df['Valid_generated_objects'][:10]
self.top50_valid_molecules = property_df['Valid_generated_objects'][:50]
self.top_properties_scores={}
self.top_properties_smiles={}
self.len_property_df = len(property_df)
for i in range(1,11):
if self.len_property_df > i-1:
self.top_properties_scores['top_'+str(i)] = property_df['Property_valid_generated_objects'][i-1]
self.top_properties_smiles['top_'+str(i)] = property_df['Valid_generated_objects'][i-1]
else:
self.top_properties_scores['top_'+str(i)] = None
self.top_properties_smiles['top_'+str(i)] = None
if self.len_property_df > 0:
#Avg property
self.property_all = property_df['Property_valid_generated_objects'].mean()
self.property_top10 = property_df['Property_valid_generated_objects'][:10].mean()
self.property_top50 = property_df['Property_valid_generated_objects'][:50].mean()
#Compute Unicity of generated objects
self.unicity_all = check_unicity_objects(self.valid_generated_objects_list)
self.unicity_top10 = check_unicity_objects(self.top10_valid_molecules)
#Compute Novelty of generated objects
self.novelty_all = check_novelty_objects(self.valid_generated_objects_list, model_training_data)
self.novelty_top10 = check_novelty_objects(self.top10_valid_molecules, model_training_data)
#Quality
self.quality_all = property_df['Pass_quality_filters'].astype(int).mean()
self.quality_top10 = np.nanmean(QF.check_smiles_pass_quality_filters_flag(self.top10_valid_molecules))
#QED
self.qed_all = np.nanmean(np.array([compute_qed(x) for x in self.valid_generated_objects_list]))
self.qed_top10 = np.nanmean(np.array([compute_qed(x) for x in self.top10_valid_molecules]))
else:
self.property_all = None
self.property_top10 = None
self.property_top50 = None
self.unicity_all = None
self.unicity_top10 = None
self.novelty_all = None
self.novelty_top10 = None
self.quality_all = None
self.quality_top10 = None
self.qed_all = None
self.qed_top10 = None
#Stats passing quality filters
property_df_qual = property_df[property_df['Pass_quality_filters']]
property_df_qual.reset_index(inplace=True)
if len(property_df_qual)>0:
self.property_all_qual = property_df_qual['Property_valid_generated_objects'].mean()
self.property_top5avg_qual = property_df_qual['Property_valid_generated_objects'][:5].mean()
self.property_top10avg_qual = property_df_qual['Property_valid_generated_objects'][:10].mean()
self.property_top50avg_qual = property_df_qual['Property_valid_generated_objects'][:50].mean()
self.qed_all_qual = np.nanmean(np.array([compute_qed(x) for x in property_df_qual['Valid_generated_objects']]))
self.qed_top10_qual = np.nanmean(np.array([compute_qed(x) for x in property_df_qual['Valid_generated_objects'][:10]]))
else:
self.property_all_qual = None
self.property_top5avg_qual = None
self.property_top10avg_qual = None
self.property_top50avg_qual = None
self.qed_all_qual = None
self.qed_top10_qual = None
self.property_top_qual = {}
for i in range(1,11):
if len(property_df_qual) > i-1:
self.property_top_qual[i]=property_df_qual['Property_valid_generated_objects'][i-1]
else:
self.property_top_qual[i]=None
def log_all_stats_generated_objects(self, filename):
results={}
log_stats(file_name= filename, stats=self.num_generated_objects, log_entry="Number of generated objects")
log_stats(file_name= filename, stats=self.validity_all, log_entry="Proportion of valid generated objects")
results['validity_all']=self.validity_all
log_stats(file_name= filename, stats=self.unicity_all, log_entry="Proportion of unique valid generated objects")
results['unicity_all']=self.unicity_all
results['unicity_top10']=self.unicity_top10
log_stats(file_name= filename, stats=self.novelty_all, log_entry="Proportion of novel valid generated objects")
results['novelty_all']=self.novelty_all
results['novelty_top10']=self.novelty_top10
log_stats(file_name= filename, stats=self.quality_all, log_entry="Proportion of valid generated objects passing quality filters")
results['quality_all']=self.quality_all
results['quality_top10']=self.quality_top10
log_stats(file_name= filename, stats=self.qed_all, log_entry="Avg qed of valid generated objects")
results['qed_all']=self.qed_all
results['qed_top10']=self.qed_top10
log_stats(file_name= filename, stats=self.stats_property_generated_objects, log_entry="Stats of properties of generated objects")
results['target_property_all']=self.property_all
results['target_property_top10']=self.property_top10
results['target_property_top50']=self.property_top50
for i in range(1,11):
if self.len_property_df > i-1:
log_stats(file_name= filename, stats=self.top_properties_scores['top_'+str(i)], log_entry="Property of top "+str(i)+" generated object")
log_stats(file_name= filename, stats=self.top_properties_smiles['top_'+str(i)], log_entry="Smiles of top "+str(i)+" generated object")
results['top'+str(i)]=self.top_properties_scores['top_'+str(i)]
else:
results['top'+str(i)]=None
#Qual metrics
results['property_all_qual'] = self.property_all_qual
for i in range(1,11):
results['property_top'+str(i)+'_qual'] = self.property_top_qual[i]
results['property_top5avg_qual'] = self.property_top5avg_qual
results['property_top10avg_qual'] = self.property_top10avg_qual
results['property_top50avg_qual'] = self.property_top50avg_qual
results['qed_all_qual'] = self.qed_all_qual
results['qed_top10_qual'] = self.qed_top10_qual
results['top_10_molecules'] = self.top10_valid_molecules
return results
###################################### OPTIMIZATION INITIALIZATION ##################################################
def starting_objects_latent_embeddings(model, data, mode="random", num_objects_to_select=100, batch_size=256, property_upper_bound=None, model_type="JTVAE"):
if model_type=="JTVAE":
latent_space_dim = model.latent_size * 2
elif model_type=="CharVAE":
latent_space_dim = model.params.z_dim
if mode=="random":
num_objects_data = len(data)
selected_objects_indices = np.random.choice(a=range(num_objects_data), size=num_objects_to_select, replace=False).tolist()
starting_objects = np.array(data)[selected_objects_indices]
if model_type=="JTVAE":
starting_objects_smiles = starting_objects
elif model_type=="CharVAE":
starting_objects_smiles = convert_tensors_to_smiles(starting_objects, model.params.indices_char)
starting_objects_latent_embeddings = torch.zeros(num_objects_to_select, latent_space_dim).to(device)
starting_objects_properties = []
for batch_object_indices in range(0,num_objects_to_select,batch_size):
a, b = batch_object_indices, batch_object_indices+batch_size
if model_type=="JTVAE":
starting_objects_latent_embeddings[a:b] = model.encode_and_samples_from_smiles(starting_objects[a:b])
elif model_type=="CharVAE":
mu, log_var = model.encoder(starting_objects[a:b])
starting_objects_latent_embeddings[a:b] = model.sampling(mu, log_var)
for smile in starting_objects_smiles[a:b]:
starting_objects_properties.append(compute_target_logP(smile))
starting_objects_properties = torch.tensor(starting_objects_properties)
elif mode=="low_property_objects":
num_starting_points_selected = 0
index_object_in_dataset = 0
starting_objects = []
starting_objects_smiles = []
starting_objects_properties = []
starting_objects_latent_embeddings = torch.zeros(num_objects_to_select, latent_space_dim).to(device)
while num_starting_points_selected < num_objects_to_select:
if model_type=='JTVAE':
smile_potential_starting_object = data[index_object_in_dataset]
elif model_type=='CharVAE':
potential_starting_object = data[index_object_in_dataset].unsqueeze(0)
smile_potential_starting_object = convert_tensors_to_smiles(potential_starting_object, model.params.indices_char)[0]
final_logP = compute_target_logP(smile_potential_starting_object)
if final_logP < property_upper_bound and final_logP > - 100:
if model_type=='JTVAE':
new_object_latent_representation = model.encode_and_samples_from_smiles([smile_potential_starting_object])
elif model_type=='CharVAE':
mu, log_var = model.encoder(potential_starting_object)
new_object_latent_representation = model.sampling(mu, log_var)
starting_objects_latent_embeddings[num_starting_points_selected] = new_object_latent_representation
starting_objects_properties.append(final_logP)
starting_objects_smiles.append(smile_potential_starting_object)
num_starting_points_selected+=1
index_object_in_dataset+=1
starting_objects_properties=torch.tensor(starting_objects_properties)
return starting_objects_latent_embeddings, starting_objects_properties, starting_objects_smiles
###################################### OPTIMIZATION ROUTINES ##################################################
def gradient_ascent_optimization(model, starting_objects_latent_embeddings, number_gradient_steps=10,
uncertainty_decoder_method=None, num_sampled_models=10, num_sampled_outcomes=40,
model_decoding_mode=None, model_decoding_topk_value=None, alpha=1.0, normalize_gradients=True,
batch_size=64, uncertainty_threshold="No_constraint", keep_all_generated=False, model_type="JTVAE"
):
"""
Perform gradient ascent in latent space. Filter out invalid points, ie. above uncertainty threshold. Keep last number_starting_objects valid points.
model_decoding_mode and model_decoding_topk_value are only relevant for RNN decoding (CharVAE).
"""
number_starting_objects = len(starting_objects_latent_embeddings)
generated_objects_list=[]
if model_type=='JTVAE':
hidden_dim = model.latent_size*2
elif model_type=='CharVAE':
hidden_dim = model.params.z_dim
if model_decoding_mode is not None:
model.sampling_mode = model_decoding_mode
model.generation_top_k_sampling = model_decoding_topk_value
if uncertainty_threshold!='No_constraint':
uncertainty_threshold_value = decoder_uncertainty_stats_training[model_type][uncertainty_decoder_method][uncertainty_threshold]
all_points_latent_representation = torch.zeros((number_gradient_steps+1)*number_starting_objects, hidden_dim)
all_points_latent_representation[:number_starting_objects] = starting_objects_latent_embeddings.view(-1,hidden_dim)
new_objects_latent_representation = starting_objects_latent_embeddings
for step in tqdm.tqdm(range(1, number_gradient_steps+1)):
torch.cuda.empty_cache()
model.zero_grad()
gradient = torch.zeros(number_starting_objects, hidden_dim).to(device)
for batch_object_indices in range(0, number_starting_objects, batch_size):
model.zero_grad()
a, b = batch_object_indices , batch_object_indices+batch_size
new_objects_latent_representation_slice = torch.autograd.Variable(new_objects_latent_representation[a:b], requires_grad=True)
if model_type=='JTVAE':
predicted_property_slice = model.prop_net(new_objects_latent_representation_slice).squeeze()
predicted_property_slice = (predicted_property_slice - (final_logP_train_stats_raw['mean'])) / (final_logP_train_stats_raw['std'])
elif model_type=='CharVAE':
predicted_property_slice = model.qed_net(new_objects_latent_representation_slice).squeeze()
gradient[a:b] = torch.autograd.grad(outputs = predicted_property_slice,
inputs = new_objects_latent_representation_slice,
grad_outputs = torch.ones_like(predicted_property_slice).to(device),
retain_graph=False)[0]
if normalize_gradients:
gradient /= torch.norm(gradient,2)
new_objects_latent_representation = new_objects_latent_representation + alpha * gradient
all_points_latent_representation[step*number_starting_objects:(step+1)*number_starting_objects] = new_objects_latent_representation.view(-1,hidden_dim)
if uncertainty_threshold!='No_constraint':
if keep_all_generated: #Need to compute uncertainty for all points
with torch.no_grad():
num_points_total = (number_gradient_steps+1)*number_starting_objects
uncertainty_all_points = torch.zeros(num_points_total)
for batch_object_indices in range(0, num_points_total, batch_size):
z_slice = all_points_latent_representation[batch_object_indices:batch_object_indices+batch_size].to(device)
uncertainty_all_points[batch_object_indices:batch_object_indices+batch_size] = model.decoder_uncertainty_from_latent(
z = z_slice,
method = uncertainty_decoder_method,
num_sampled_models=num_sampled_models,
num_sampled_outcomes=num_sampled_outcomes
).squeeze().detach().cpu()
index_below_uncertainty_threshold = (uncertainty_all_points < uncertainty_threshold_value)
all_points_latent_representation = all_points_latent_representation[index_below_uncertainty_threshold]
selected_points = all_points_latent_representation
else: #We compute uncertainty in batches starting from latest batch of points generated, and continue until we have reached the desired number of points below uncertainty threshold
with torch.no_grad():
num_points_to_generate = number_starting_objects
point_index = (number_gradient_steps+1)*number_starting_objects + 1
selected_points=[]
while num_points_to_generate > 0:
if point_index>0:
potential_points=all_points_latent_representation[max(point_index-batch_size,0):point_index].view(-1,hidden_dim).to(device)
uncertainty_potential_points = model.decoder_uncertainty_from_latent(
z = potential_points,
method = uncertainty_decoder_method,
num_sampled_models=num_sampled_models,
num_sampled_outcomes=num_sampled_outcomes
).squeeze().detach().cpu()
count_below=(uncertainty_potential_points < uncertainty_threshold_value).sum()
num_points_to_generate -=count_below
selected_points.extend(potential_points[uncertainty_potential_points < uncertainty_threshold_value])
point_index-=batch_size
selected_points=selected_points[:number_starting_objects]
else:
if keep_all_generated:
selected_points=all_points_latent_representation
else:
selected_points=all_points_latent_representation[-number_starting_objects:]
with torch.no_grad():
if model_type=='JTVAE':
for idx in range(len(selected_points)):
z = selected_points[idx].view(1,hidden_dim).to(device)
z_tree, z_mol = z[:,:model.latent_size], z[:,model.latent_size:]
smiles_new_objects = model.decode(z_tree, z_mol, prob_decode=False)
generated_objects_list.append(smiles_new_objects)
elif model_type=='CharVAE':
for batch_object_indices in range(0, len(selected_points), batch_size):
decoded_new_objects = model.generate_from_latent(selected_points[batch_object_indices:batch_object_indices+batch_size].to(device))
smiles_new_objects = convert_tensors_to_smiles(decoded_new_objects, model.params.indices_char)
generated_objects_list.append(smiles_new_objects)
return generated_objects_list
def bayesian_optimization(model, starting_objects_latent_embeddings, starting_objects_properties, number_BO_steps, BO_uncertainty_mode,
BO_uncertainty_threshold='No_constraint', BO_uncertainty_coeff=0.0, uncertainty_decoder_method=None, num_sampled_models=10, num_sampled_outcomes = 40,
model_decoding_mode=None, model_decoding_topk_value=None, BO_acquisition_function="UCB", BO_default_value_invalid=0.0,
min_bound=-2, max_bound = 2, batch_size=64, generation_timout_seconds=600, model_type="JTVAE"
):
"""
Bayesian optimization in latent space. Two different modes: BO_uncertainty_mode=="Penalized_objective" (uncertainty-aware surrogate) or BO_uncertainty_mode=="Uncertainty_censoring"
model_decoding_mode and model_decoding_topk_value are only relevant for RNN decoding (CharVAE).
"""
smiles_generated_objects = []
pred_property_values = []
if model_type=='JTVAE':
hidden_dim = model.latent_size*2
elif model_type=='CharVAE':
hidden_dim = model.params.z_dim
if model_decoding_mode is not None:
model.sampling_mode = model_decoding_mode
model.generation_top_k_sampling = model_decoding_topk_value
#compute actual uncertainty threshold for uncertainty_censoring mode based on percentile
if BO_uncertainty_mode=="Uncertainty_censoring" and BO_uncertainty_threshold!='No_constraint':
BO_uncertainty_threshold_value = decoder_uncertainty_stats_training[model_type][uncertainty_decoder_method][BO_uncertainty_threshold]
objects_latent_representation = starting_objects_latent_embeddings.view(-1, hidden_dim)
objects_properties = starting_objects_properties.view(-1,1)
for step in tqdm.tqdm(range(number_BO_steps)):
num_training_points_surrogate = len(objects_latent_representation)
train_X = objects_latent_representation.detach().to(device)
train_Y = standardize(objects_properties).detach().to(device)
if BO_uncertainty_mode=="Penalized_objective" and BO_uncertainty_coeff > 0.0:
with torch.no_grad():
if step == 0: #On the first step, we compute uncertainty for all starting (latent) points
uncertainty_decoder = torch.zeros(num_training_points_surrogate).to(device)
for batch_object_indices in range(0, num_training_points_surrogate, batch_size):
a, b = batch_object_indices , batch_object_indices+batch_size
z_slice = objects_latent_representation[a:b].to(device)
uncertainty_decoder[batch_object_indices:batch_object_indices+batch_size] = model.decoder_uncertainty_from_latent(
z = z_slice,
method = uncertainty_decoder_method,
num_sampled_models=num_sampled_models,
num_sampled_outcomes=num_sampled_outcomes
).squeeze().detach().cpu()
else:
#For all subsequent steps, we just need to compute the uncertainty for the new point and add to previously computed uncertainties
new_point_uncertainty_decoder[batch_object_indices:batch_object_indices+batch_size] = model.decoder_uncertainty_from_latent(
z = generated_object.to(device),
method = uncertainty_decoder_method,
num_sampled_models=num_sampled_models,
num_sampled_outcomes=num_sampled_outcomes
)
uncertainty_decoder = torch.cat(tensors=(uncertainty_decoder, new_point_uncertainty_decoder.view(1)), dim=0)
train_Y = train_Y - BO_uncertainty_coeff * standardize(uncertainty_decoder.view(-1,1))
train_Y = train_Y.detach().to(device)
#Single-task exact GP model
gp = SingleTaskGP(train_X=train_X, train_Y=train_Y).to(device)
mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
fit_gpytorch_model(mll)
#Acquisition function:
if BO_acquisition_function=="UCB":
BO_acq_func, q = UpperConfidenceBound(gp, beta=0.1), 1
elif BO_acquisition_function=="EI":
BO_acq_func, q = ExpectedImprovement(gp, best_f=0.1), 1
elif BO_acquisition_function=="PI":
BO_acq_func, q = ProbabilityOfImprovement(gp, best_f=0.1), 1
elif BO_acquisition_function=="qUCB":
BO_acq_func, q = qUpperConfidenceBound(gp, beta=0.1), 20
elif BO_acquisition_function=="qEI":
BO_acq_func, q = qExpectedImprovement(gp, best_f=0.1), 20
elif BO_acquisition_function=="qNoisyEI":
BO_acq_func, q = qNoisyExpectedImprovement(gp), 20
#Optimize the acquisition function
print("Optimizing acq function")
bounds = torch.stack([torch.ones(hidden_dim) * min_bound, torch.ones(hidden_dim) * max_bound]).to(device)
generated_object, pred_property_value = optimize_acqf(
acq_function=BO_acq_func,
bounds=bounds,
q=q,
num_restarts=min(20,num_training_points_surrogate),
raw_samples=num_training_points_surrogate,
sequential=True,
return_best_only=True
)
generated_object = generated_object.view(-1,hidden_dim)
with torch.no_grad():
if BO_uncertainty_mode=="Uncertainty_censoring" and BO_uncertainty_threshold!="No_constraint":
#Compute uncertainty for each candidate. Check which are below threshold. If at least one, return the one with best predicted value. Otherwise, return lowest uncertainty point.
uncertainty_generated = torch.zeros(len(generated_object)).to(device)
for batch_object_indices in range(0, q, batch_size):
a, b = batch_object_indices , batch_object_indices+batch_size
z_slice = generated_object[a:b].to(device)
uncertainty_generated[batch_object_indices:batch_object_indices+batch_size] = model.decoder_uncertainty_from_latent(
z = z_slice,
method = uncertainty_decoder_method,
num_sampled_models=num_sampled_models,
num_sampled_outcomes=num_sampled_outcomes
).squeeze()
index_below_uncertainty_threshold = (uncertainty_generated < BO_uncertainty_threshold_value)
num_below_threshold = index_below_uncertainty_threshold.int().sum()
if num_below_threshold > 0:
generated_object = generated_object[index_below_uncertainty_threshold]
pred_property_value = pred_property_value[index_below_uncertainty_threshold]
generated_object = generated_object[-1]
pred_property_value = pred_property_value[-1]
else:
min_uncertainty_point = uncertainty_generated.argmin()
generated_object = generated_object[min_uncertainty_point]
pred_property_value = pred_property_value[min_uncertainty_point]
else:
if len(generated_object)>1:
generated_object = generated_object[-1]
pred_property_value = pred_property_value[-1]
pred_property_values.append(pred_property_value.item())
generated_object = generated_object.view(1,hidden_dim)
with torch.no_grad():
if model_type=='JTVAE':
z = generated_object.view(1,hidden_dim).to(device)
z_tree, z_mol = z[:,:model.latent_size], z[:,model.latent_size:]
try:
smiles_new_object = func_timeout.func_timeout(generation_timout_seconds, model.decode, args=(z_tree, z_mol), kwargs={'prob_decode':False})
new_point_property = compute_target_logP(smiles_new_object, default_value=BO_default_value_invalid)
smiles_generated_objects.append(smiles_new_object)
objects_properties = torch.cat(tensors=(objects_properties.float(), torch.tensor(new_point_property).view(1).float()), dim=0)
objects_latent_representation = torch.cat(tensors=(objects_latent_representation, generated_object.view(1,hidden_dim)), dim=0)
except:
print("timed out")
elif model_type=='CharVAE':
decoded_new_object = model.generate_from_latent(generated_object)
smiles_new_object = convert_tensors_to_smiles(decoded_new_object, model.params.indices_char)[0]
smiles_generated_objects.append(smiles_new_object)
new_point_property = compute_target_logP(smiles_new_object, default_value=BO_default_value_invalid)
objects_properties = torch.cat(tensors=(objects_properties.float(), torch.tensor(new_point_property).view(1).float()), dim=0)
objects_latent_representation = torch.cat(tensors=(objects_latent_representation, generated_object.view(1,hidden_dim)), dim=0)
return smiles_generated_objects, pred_property_values
|
80920
|
import numpy as np
import pytest
from ansys import dpf
from ansys.dpf import core
from ansys.dpf.core import FieldDefinition
from ansys.dpf.core import operators as ops
from ansys.dpf.core.common import locations, shell_layers
@pytest.fixture()
def stress_field(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
return stress.outputs.fields_container()[0]
def test_create_field():
field = dpf.core.Field()
assert field._message.id != 0
def test_create_field_from_helper_scalar():
data = np.random.random(10)
field_a = dpf.core.field_from_array(data)
assert np.allclose(field_a.data, data)
def test_create_field_from_helper_vector():
data = np.random.random((10, 3))
field_a = dpf.core.field_from_array(data)
assert np.allclose(field_a.data, data)
def test_createbycopy_field():
field = dpf.core.Field()
field2 = dpf.core.Field(field=field._message)
assert field._message.id == field2._message.id
def test_set_get_scoping():
field = dpf.core.Field()
scoping = dpf.core.Scoping()
ids = [1, 2, 3, 5, 8, 9, 10]
scoping.ids = ids
field.scoping = scoping
assert field.scoping.ids == ids
def test_set_get_data_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.scalar)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert np.allclose(field.data, data)
def test_set_get_data_array_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.vector)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
data.append(i + 0.001)
data.append(i + 0.001)
data = np.array(data)
data = data.reshape((20, 3))
scoping.ids = ids
field.scoping = scoping
field.data = data
assert np.allclose(field.data, data)
def test_append_data_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.vector)
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
data = [0.01 + i, 0.02 + i, 0.03 + i]
field.append(data, scopingid)
scopingOut = field.scoping
assert scopingOut.ids == list(range(1, 21))
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
datain = [0.01 + i, 0.02 + i, 0.03 + i]
dataout = field.get_entity_data(scopingindex)
assert np.allclose(dataout, datain)
def test_set_get_entity_data_array_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.vector)
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
data = [0.01 + i, 0.02 + i, 0.03 + i]
data = np.array(data)
data = data.reshape((1, 3))
field.append(data, scopingid)
scopingOut = field.scoping
assert scopingOut.ids == list(range(1, 21))
for i in range(0, 20):
scopingid = i + 1
scopingindex = i
datain = [0.01 + i, 0.02 + i, 0.03 + i]
dataout = field.get_entity_data(scopingindex)
assert np.allclose(dataout, datain)
dataout = field.get_entity_data_by_id(scopingid)
assert np.allclose(dataout, datain)
# def test_get_data_ptr_field():
# field= dpf.core.Field(nentities=3, nature=dpf.core.natures.scalar,
# location=dpf.core.locations.elemental_nodal)
# data = [0.01,0.02,0.03]
# field.set_entity_data(data,0,1)
# data = [0.01,0.02,0.03,0.01,0.02,0.03]
# field.set_entity_data(data,1,2)
# data = [0.01,0.02,0.03,0.01]
# field.set_entity_data(data,2,3)
# scopingOut = field.scoping
# assert scopingOut.ids == [1,2,3]
# dataptr = field.data_ptr
# assert dataptr == [0,3,9]
def test_set_get_data_property_field():
field = core.Field(nentities=20, nature=dpf.core.natures.scalar)
scoping = core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert np.allclose(field.data, data)
def test_count_field():
field = dpf.core.Field(nentities=20, nature=dpf.core.natures.scalar)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert field.component_count == 1
assert field.elementary_data_count == 20
assert field.size == 20
def test_resize_field():
field = dpf.core.Field(nentities=1, nature=dpf.core.natures.scalar)
scoping = dpf.core.Scoping()
ids = []
data = []
for i in range(0, 20):
ids.append(i + 1)
data.append(i + 0.001)
field.resize(20, 20)
scoping.ids = ids
field.scoping = scoping
field.data = data
assert field.component_count == 1
assert field.elementary_data_count == 20
assert field.size == 20
def test_fromarray_field():
data = np.empty((100, 6))
f = dpf.core.field_from_array(data)
assert f.shape == (100, 6)
def test_field_definition_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
assert f.unit == "m"
assert f.location == dpf.core.locations.nodal
def test_field_definition_modif_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
fielddef = f.field_definition
assert fielddef.unit == "m"
assert fielddef.location == dpf.core.locations.nodal
assert fielddef.dimensionality.nature == dpf.core.natures.vector
assert fielddef.dimensionality.dim == [3]
assert fielddef.shell_layers == dpf.core.shell_layers.layerindependent
fielddef.unit = "mm"
assert fielddef.unit == "mm"
fielddef.location = dpf.core.locations.elemental
assert fielddef.location == dpf.core.locations.elemental
fielddef.dimensionality = dpf.core.Dimensionality.scalar_dim()
assert fielddef.dimensionality.nature == dpf.core.natures.scalar
assert fielddef.dimensionality.dim == [1]
fielddef.dimensionality = dpf.core.Dimensionality.tensor_dim()
assert fielddef.dimensionality.nature == dpf.core.natures.symmatrix
assert fielddef.dimensionality.dim == [3, 3]
fielddef.dimensionality = dpf.core.Dimensionality.vector_3d_dim()
assert fielddef.dimensionality.nature == dpf.core.natures.vector
assert fielddef.dimensionality.dim == [3]
fielddef.dimensionality = dpf.core.Dimensionality.vector_dim(4)
assert fielddef.dimensionality.nature == dpf.core.natures.vector
assert fielddef.dimensionality.dim == [4]
fielddef.shell_layers = dpf.core.shell_layers.bottom
assert fielddef.shell_layers == dpf.core.shell_layers.bottom
def test_field_definition_set_in_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
fielddef = f.field_definition
fielddef.unit = "mm"
fielddef.location = dpf.core.locations.elemental
fielddef.dimensionality = dpf.core.Dimensionality.scalar_dim()
fielddef.shell_layers = dpf.core.shell_layers.bottom
f.field_definition = fielddef
fielddef = f.field_definition
assert fielddef.unit == "mm"
assert fielddef.location == dpf.core.locations.elemental
assert fielddef.dimensionality.nature == dpf.core.natures.scalar
assert fielddef.dimensionality.dim == [1]
assert fielddef.shell_layers == dpf.core.shell_layers.bottom
assert f.unit == "mm"
assert f.location == dpf.core.locations.elemental
assert f.dimensionality.nature == dpf.core.natures.scalar
assert f.dimensionality.dim == [1]
assert f.shell_layers == dpf.core.shell_layers.bottom
def test_change_field_definition_in_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("U")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
f = fcOut[0]
f.unit = "mm"
f.location = dpf.core.locations.elemental
f.dimensionality = dpf.core.Dimensionality.scalar_dim()
f.shell_layers = dpf.core.shell_layers.bottom
fielddef = f.field_definition
assert fielddef.unit == "mm"
assert fielddef.location == dpf.core.locations.elemental
assert fielddef.dimensionality.nature == dpf.core.natures.scalar
assert fielddef.dimensionality.dim == [1]
assert fielddef.shell_layers == dpf.core.shell_layers.bottom
assert f.unit == "mm"
assert f.location == dpf.core.locations.elemental
assert f.dimensionality.nature == dpf.core.natures.scalar
assert f.dimensionality.dim == [1]
assert f.shell_layers == dpf.core.shell_layers.bottom
def test_create_overall_field():
field_overall = dpf.core.Field(nentities=1, location="overall", nature="vector")
field_overall.scoping.location = "overall"
field_overall.scoping.ids = [0]
field_overall.data = [1.0, 2.0, 3.0]
field = dpf.core.Field(nentities=5, location="nodal")
field.scoping.location = "nodal"
field.scoping.ids = list(range(1, 6))
data = [float(i) for i in range(0, 15)]
field.data = data
add = dpf.core.Operator("add")
add.inputs.fieldA(field)
add.inputs.fieldB(field_overall)
field_added = add.outputs.field()
data_added = field_added.data
for i in range(0, 5):
assert np.allclose(data_added[i], [i * 3.0 + 1.0, i * 3.0 + 3.0, i * 3.0 + 5.0])
def test_data_pointer_field(allkindofcomplexity):
dataSource = dpf.core.DataSources()
dataSource.set_result_file_path(allkindofcomplexity)
op = dpf.core.Operator("S")
op.connect(4, dataSource)
fcOut = op.get_output(0, dpf.core.types.fields_container)
data_pointer = fcOut[0]._data_pointer
assert len(data_pointer) == len(fcOut[0].scoping)
assert data_pointer[0] == 0
assert data_pointer[1] == 72
f = fcOut[0]
data_pointer[1] = 40
f._data_pointer = data_pointer
data_pointer = fcOut[0]._data_pointer
assert len(data_pointer) == len(fcOut[0].scoping)
assert data_pointer[0] == 0
assert data_pointer[1] == 40
def test_data_pointer_prop_field():
pfield = dpf.core.PropertyField()
pfield.append([1, 2, 3], 1)
pfield.append([1, 2, 3, 4], 2)
pfield.append([1, 2, 3], 3)
data_pointer = pfield._data_pointer
assert len(data_pointer) == 3
assert data_pointer[0] == 0
assert data_pointer[1] == 3
assert data_pointer[2] == 7
data_pointer[1] = 4
pfield._data_pointer = data_pointer
data_pointer = pfield._data_pointer
assert len(data_pointer) == 3
assert data_pointer[0] == 0
assert data_pointer[1] == 4
assert data_pointer[2] == 7
def test_append_data_elemental_nodal_field(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
assert f.location == "ElementalNodal"
f_new = dpf.core.Field(
f.scoping.size,
nature=dpf.core.natures.symmatrix,
location=dpf.core.locations.elemental_nodal,
)
size = int(f.scoping.size / 100)
for i in range(0, size):
f_new.append(f.get_entity_data(i), f.scoping.id(i))
for i in range(0, size):
assert np.allclose(f_new.get_entity_data(i), f.get_entity_data(i))
def test_str_field(stress_field):
assert "Location" in str(stress_field)
assert "ElementalNodal" in str(stress_field)
assert "Unit" in str(stress_field)
assert "Pa" in str(stress_field)
assert "9255" in str(stress_field)
assert "40016" in str(stress_field)
assert "6" in str(stress_field)
def test_to_nodal(stress_field):
assert stress_field.location == "ElementalNodal"
field_out = stress_field.to_nodal()
assert field_out.location == "Nodal"
def test_mesh_support_field(stress_field):
mesh = stress_field.meshed_region
assert len(mesh.nodes.scoping) == 15129
assert len(mesh.elements.scoping) == 10292
def test_shell_layers_1(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
assert f.shell_layers == shell_layers.topbottommid
model = dpf.core.Model(allkindofcomplexity)
disp = model.results.displacement()
f = disp.outputs.fields_container()[0]
assert f.shell_layers == shell_layers.layerindependent
def test_shell_layers_2(velocity_acceleration):
model = dpf.core.Model(velocity_acceleration)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
assert f.shell_layers == shell_layers.nonelayer
def test_mesh_support_field_model(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
f = stress.outputs.fields_container()[0]
mesh = f.meshed_region
assert len(mesh.nodes.scoping) == 15129
assert len(mesh.elements.scoping) == 10292
def test_delete_auto_field():
field = dpf.core.Field()
field2 = dpf.core.Field(field=field)
del field
with pytest.raises(Exception):
field2.get_ids()
def test_create_and_update_field_definition():
fieldDef = FieldDefinition()
assert fieldDef is not None
with pytest.raises(Exception):
assert fieldDef.location is None
fieldDef.location = locations.nodal
assert fieldDef.location == locations.nodal
def test_set_support_timefreq(simple_bar):
tfq = dpf.core.TimeFreqSupport()
time_frequencies = dpf.core.Field(
nature=dpf.core.natures.scalar, location=dpf.core.locations.time_freq
)
time_frequencies.scoping.location = dpf.core.locations.time_freq_step
time_frequencies.append([0.1, 0.32, 0.4], 1)
tfq.time_frequencies = time_frequencies
model = dpf.core.Model(simple_bar)
disp = model.results.displacement()
fc = disp.outputs.fields_container()
field = fc[0]
# initial_support = field.time_freq_support
# assert initial_support is None
field.time_freq_support = tfq
tfq_to_check = field.time_freq_support
assert np.allclose(tfq.time_frequencies.data, tfq_to_check.time_frequencies.data)
def test_set_support_mesh(simple_bar):
mesh = dpf.core.MeshedRegion()
mesh.nodes.add_node(1, [0.0, 0.0, 0.0])
model = dpf.core.Model(simple_bar)
disp = model.results.displacement()
fc = disp.outputs.fields_container()
field = fc[0]
field.meshed_region = mesh
mesh_to_check = field.meshed_region
assert mesh_to_check.nodes.n_nodes == 1
assert mesh_to_check.elements.n_elements == 0
mesh.nodes.add_node(2, [1.0, 0.0, 0.0])
mesh.nodes.add_node(3, [1.0, 1.0, 0.0])
mesh.nodes.add_node(4, [0.0, 1.0, 0.0])
field.meshed_region = mesh
mesh_to_check_2 = field.meshed_region
assert mesh_to_check_2.nodes.n_nodes == 4
assert mesh_to_check_2.elements.n_elements == 0
def test_local_field_append():
num_entities = 400
field_to_local = dpf.core.fields_factory.create_3d_vector_field(num_entities)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append([0.1 * i, 0.2 * i, 0.3 * i], i)
assert f._is_set == True
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append([0.1 * i, 0.2 * i, 0.3 * i], i)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == 0
def test_local_elemental_nodal_field_append():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]], i)
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]], i)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
# flat data
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append([0.1 * i, 0.2 * i, 0.3 * i, 0.1 * i, 0.2 * i, 0.3 * i], i)
assert f._is_set == True
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
def test_local_array_field_append():
num_entities = 400
field_to_local = dpf.core.fields_factory.create_3d_vector_field(num_entities)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(np.array([0.1 * i, 0.2 * i, 0.3 * i]), i)
assert f._is_set is True
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append(np.array([0.1 * i, 0.2 * i, 0.3 * i]), i)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == 0
def test_local_elemental_nodal_array_field_append():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(
np.array([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]), i
)
field = dpf.core.fields_factory.create_3d_vector_field(num_entities)
for i in range(1, num_entities + 1):
field.append(
np.array([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]), i
)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
# flat data
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(
np.array([0.1 * i, 0.2 * i, 0.3 * i, 0.1 * i, 0.2 * i, 0.3 * i]), i
)
assert np.allclose(field.data, field_to_local.data)
assert np.allclose(field.scoping.ids, field_to_local.scoping.ids)
assert len(field_to_local._data_pointer) == num_entities
def test_local_get_entity_data():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(np.array([[0.1 * i, 0.2 * i, 0.3 * i]]), i)
assert np.allclose(f.get_entity_data(i - 1), [[0.1 * i, 0.2 * i, 0.3 * i]])
assert np.allclose(
f.get_entity_data_by_id(i), [[0.1 * i, 0.2 * i, 0.3 * i]]
)
assert hasattr(f, "_is_set") is True
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
assert np.allclose(f.get_entity_data(i - 1), [[0.1 * i, 0.2 * i, 0.3 * i]])
assert np.allclose(
f.get_entity_data_by_id(i), [[0.1 * i, 0.2 * i, 0.3 * i]]
)
assert hasattr(f, "_is_set") is False
def test_local_elemental_nodal_get_entity_data():
num_entities = 100
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
f.append(
np.array([[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]]), i
)
assert np.allclose(
f.get_entity_data(i - 1),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert np.allclose(
f.get_entity_data_by_id(i),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert hasattr(f, "_is_set") is True
assert f._is_set is True
with field_to_local.as_local_field() as f:
for i in range(1, num_entities + 1):
assert np.allclose(
f.get_entity_data(i - 1),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert np.allclose(
f.get_entity_data_by_id(i),
[[0.1 * i, 0.2 * i, 0.3 * i], [0.1 * i, 0.2 * i, 0.3 * i]],
)
assert hasattr(f, "_is_set") is False
def test_auto_delete_field_local():
num_entities = 1
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
field_to_local.append([3.0, 4.0, 5.0], 1)
fc = dpf.core.fields_container_factory.over_time_freq_fields_container(
[field_to_local]
)
field_to_local = None
with fc[0].as_local_field() as f:
assert np.allclose(f.get_entity_data(0), [3.0, 4.0, 5.0])
def test_auto_delete_field_local2():
num_entities = 1
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
num_entities, location=dpf.core.locations.elemental_nodal
)
f = field_to_local.as_local_field()
f.append([3.0, 4.0, 5.0], 1)
del f
with field_to_local.as_local_field() as f:
assert np.allclose(f.get_entity_data(0), [3.0, 4.0, 5.0])
def test_get_set_data_local_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
2, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
f.data = [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
with field_to_local.as_local_field() as f:
f.data = [0.1, 0.2, 0.3, 0.1, 0.2, 0.3]
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
with field_to_local.as_local_field() as f:
f.data = np.array([[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
def test_get_set_data_elemental_nodal_local_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
2, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
f.data = [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
f._data_pointer = [0, 6]
f.scoping_ids = [1, 2]
assert np.allclose(
f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
assert np.allclose(f._data_pointer, [0, 6])
assert np.allclose(f.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]])
assert hasattr(f, "_is_set") is True
assert f._is_set is True
assert np.allclose(
field_to_local.data,
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]],
)
assert np.allclose(field_to_local._data_pointer, [0, 6])
assert np.allclose(
field_to_local.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
)
assert np.allclose(
field_to_local.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
with field_to_local.as_local_field() as f:
f.data = [0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2, 0.4]
f._data_pointer = [0, 6]
f.scoping_ids = [1, 2]
assert np.allclose(
f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
assert np.allclose(f._data_pointer, [0, 6])
assert np.allclose(f.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]])
assert hasattr(f, "_is_set") is True
assert f._is_set is True
assert np.allclose(
field_to_local.data,
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]],
)
assert np.allclose(field_to_local._data_pointer, [0, 6])
assert np.allclose(
field_to_local.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
)
assert np.allclose(
field_to_local.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
with field_to_local.as_local_field() as f:
f.data = np.array(
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
f._data_pointer = [0, 6]
f.scoping_ids = [1, 2]
assert np.allclose(
f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
assert np.allclose(f._data_pointer, [0, 6])
assert np.allclose(f.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]])
assert hasattr(f, "_is_set") is True
assert f._is_set is True
assert np.allclose(
field_to_local.data,
[[0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.3], [0.1, 0.2, 0.4]],
)
assert np.allclose(field_to_local._data_pointer, [0, 6])
assert np.allclose(
field_to_local.get_entity_data(0), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
)
assert np.allclose(
field_to_local.get_entity_data(1), [[0.1, 0.2, 0.3], [0.1, 0.2, 0.4]]
)
def test_get_set_scoping_local_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(
2, location=dpf.core.locations.elemental_nodal
)
with field_to_local.as_local_field() as f:
f.data = [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]]
f.scoping = dpf.core.Scoping(ids=[3, 4])
assert np.allclose(f.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(f.scoping_ids, [3, 4])
assert np.allclose(f.scoping.ids, [3, 4])
assert np.allclose(field_to_local.data, [[0.1, 0.2, 0.3], [0.1, 0.2, 0.3]])
assert np.allclose(field_to_local.scoping.ids, [3, 4])
def test_empty_data_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(100)
data = [1.0, 2.0, 3.0]
field_to_local.data = data
assert np.allclose(field_to_local.data, data)
field_to_local.data = []
assert len(field_to_local.data) == 0
def test_set_data_numpy_array_field():
field_to_local = dpf.core.fields_factory.create_3d_vector_field(100)
arr = np.arange(300).reshape(100, 3)
field_to_local.data = arr
assert np.allclose(field_to_local.data, arr)
def test_field_huge_amount_of_data(allkindofcomplexity):
# set data with a field created from a model
model = dpf.core.Model(allkindofcomplexity)
field = model.results.displacement().outputs.fields_container()[0]
data = field.data
assert len(data) == 15113
field.data = data
new_data = field.data
assert np.allclose(data, new_data)
modif_data = data
modif_data[245] = 45
modif_data[1129] = 69
modif_data[7209] = 2086
modif_data[9046] = 12
modif_data[12897] = 7894
modif_data[15112] = 2789
field.data = modif_data
new_modif_data = field.data
assert np.allclose(new_modif_data, modif_data)
# set data with a field created from scratch
field = dpf.core.Field(nature=dpf.core.natures.scalar)
data = range(1, 1000000)
field.data = data
data_check = field.data
assert np.allclose(data_check, data)
modif_data = data_check
modif_data[245] = 45
modif_data[10046] = 69
modif_data[1999] = 2086
modif_data[50067] = 12
modif_data[999345] = 7894
modif_data[506734] = 2789
modif_data = modif_data.tolist()
field.data = modif_data
new_modif_data = field.data
assert np.allclose(new_modif_data, modif_data)
def test_deep_copy_field():
field = dpf.core.fields_factory.create_3d_vector_field(100)
arr = np.arange(300).reshape(100, 3)
field.data = arr
copy = field.deep_copy()
iden = dpf.core.operators.logic.identical_fields(field, copy)
assert iden.outputs.boolean()
assert field.unit == copy.unit
def test_deep_copy_elemental_nodal_field(allkindofcomplexity):
model = dpf.core.Model(allkindofcomplexity)
stress = model.results.stress()
field = stress.outputs.fields_container()[0]
copy = field.deep_copy()
iden = dpf.core.operators.logic.identical_fields(field, copy)
try:
assert iden.outputs.boolean()
except AssertionError as e:
print(iden.outputs.message())
raise e
mesh = field.meshed_region
copy = copy.meshed_region
assert copy.nodes.scoping.ids == mesh.nodes.scoping.ids
assert copy.elements.scoping.ids == mesh.elements.scoping.ids
assert copy.unit == mesh.unit
assert np.allclose(
copy.nodes.coordinates_field.data, mesh.nodes.coordinates_field.data
)
assert np.allclose(
copy.elements.element_types_field.data, mesh.elements.element_types_field.data
)
assert np.allclose(
copy.elements.connectivities_field.data, mesh.elements.connectivities_field.data
)
assert np.allclose(
copy.nodes.coordinates_field.scoping.ids,
mesh.nodes.coordinates_field.scoping.ids,
)
assert np.allclose(
copy.elements.element_types_field.scoping.ids,
mesh.elements.element_types_field.scoping.ids,
)
assert np.allclose(
copy.elements.connectivities_field.scoping.ids,
mesh.elements.connectivities_field.scoping.ids,
)
def test_deep_copy_over_time_field(velocity_acceleration):
model = dpf.core.Model(velocity_acceleration)
stress = model.results.stress(time_scoping=[1, 2, 3])
min_max = dpf.core.operators.min_max.min_max_fc(stress)
field = min_max.outputs.field_max()
copy = field.deep_copy()
iden = dpf.core.operators.logic.identical_fields(field, copy)
assert iden.outputs.boolean()
tf = field.time_freq_support
copy = copy.time_freq_support
assert np.allclose(tf.time_frequencies.data, copy.time_frequencies.data)
assert tf.time_frequencies.scoping.ids == copy.time_frequencies.scoping.ids
def test_deep_copy_spec_ncomp_field():
field = dpf.core.fields_factory.create_vector_field(100, 6, dpf.core.locations.elemental)
arr = np.arange(600).reshape(100, 6)
field.data = arr
copy = field.deep_copy()
assert copy.component_count == 6
assert copy.location == dpf.core.locations.elemental
def test_add_operator_field():
field = dpf.core.fields_factory.create_3d_vector_field(2)
field.data = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
field.scoping.ids = [1, 2]
# field+op
forward = ops.utility.forward_field(field)
add = field + forward
assert isinstance(add, ops.math.add)
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array(field.data) * 2.0)
# field + list
add = field + [0.0, 1.0, 2.0]
assert isinstance(add, ops.math.add)
out = add.outputs.field()
assert len(out) == 6
assert out.scoping.ids == [1, 2]
assert np.allclose(
out.data, field.data + np.array([[0.0, 1.0, 2.0], [0.0, 1.0, 2.0]])
)
# field + float
add = field + 1.0
assert isinstance(add, ops.math.add)
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))
def test_minus_operator_field():
field = dpf.core.fields_factory.create_3d_vector_field(2)
field.data = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
field.scoping.ids = [1, 2]
# field-op
forward = ops.utility.forward_field(field)
add = field - forward
assert type(add) == ops.math.minus
out = add.outputs.field()
assert len(out) == 6
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.zeros((2, 3)))
# fc - list
add = field - [0.0, 1.0, 2.0]
assert type(add) == ops.math.minus
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([[0.0, 0.0, 0.0], [3.0, 3.0, 3.0]]))
# operator - float
add = field - 1.0
assert type(add) == ops.math.minus
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([[-1.0, 0.0, 1.0], [2.0, 3.0, 4.0]]))
def test_dot_operator_field():
field = dpf.core.fields_factory.create_3d_vector_field(2)
field.data = [0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
field.scoping.ids = [1, 2]
# field * op
forward = ops.utility.forward_field(field)
add = field * forward
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([5.0, 50.0]))
# field * field
add = field * field
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([5.0, 50.0]))
# field * list
add = field * [0.0, 1.0, 2.0]
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, np.array([5.0, 14.0]))
# field * float
add = field * -1.0
assert type(add) == ops.math.generalized_inner_product
out = add.outputs.field()
assert out.scoping.ids == [1, 2]
assert np.allclose(out.data, -field.data)
if __name__ == "__main__":
test_get_set_data_local_field()
|
80961
|
from typing import List, Optional
import databases
import sqlalchemy
from fastapi import FastAPI
import ormar
app = FastAPI()
metadata = sqlalchemy.MetaData()
database = databases.Database("sqlite:///test.db")
app.state.database = database
@app.on_event("startup")
async def startup() -> None:
database_ = app.state.database
if not database_.is_connected:
await database_.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
database_ = app.state.database
if database_.is_connected:
await database_.disconnect()
class Category(ormar.Model):
class Meta:
tablename = "categories"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
class Item(ormar.Model):
class Meta:
tablename = "items"
metadata = metadata
database = database
id: int = ormar.Integer(primary_key=True)
name: str = ormar.String(max_length=100)
category: Optional[Category] = ormar.ForeignKey(Category, nullable=True)
@app.get("/items/", response_model=List[Item])
async def get_items():
items = await Item.objects.select_related("category").all()
return items
@app.post("/items/", response_model=Item)
async def create_item(item: Item):
await item.save()
return item
@app.post("/categories/", response_model=Category)
async def create_category(category: Category):
await category.save()
return category
@app.put("/items/{item_id}")
async def get_item(item_id: int, item: Item):
item_db = await Item.objects.get(pk=item_id)
return await item_db.update(**item.dict())
@app.delete("/items/{item_id}")
async def delete_item(item_id: int, item: Item = None):
if item:
return {"deleted_rows": await item.delete()}
item_db = await Item.objects.get(pk=item_id)
return {"deleted_rows": await item_db.delete()}
|
80991
|
import yaml
import pytest
from unittest import mock
import kubernetes
from kubernetes.config.config_exception import ConfigException
from mlflow.projects import kubernetes as kb
from mlflow.exceptions import ExecutionException
from mlflow.entities import RunStatus
def test_run_command_creation(): # pylint: disable=unused-argument
"""
Tests command creation.
"""
command = [
"python train.py --alpha 0.5 --l1-ratio 0.1",
"--comment 'foo bar'",
'--comment-bis "bar foo"',
]
command = kb._get_run_command(command)
assert [
"python",
"train.py",
"--alpha",
"0.5",
"--l1-ratio",
"0.1",
"--comment",
"'foo bar'",
"--comment-bis",
"'bar foo'",
] == command
def test_valid_kubernetes_job_spec(): # pylint: disable=unused-argument
"""
Tests job specification for Kubernetes.
"""
custom_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" env: \n"
" - name: DUMMY\n"
' value: "test_var"\n'
" restartPolicy: Never\n"
)
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["mlflow", "run", ".", "--no-conda", "-P", "alpha=0.5"]
env_vars = {"RUN_ID": "1"}
job_definition = kb._get_kubernetes_job_definition(
project_name=project_name,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=custom_template,
)
container_spec = job_definition["spec"]["template"]["spec"]["containers"][0]
assert container_spec["name"] == project_name
assert container_spec["image"] == image_tag + "@" + image_digest
assert container_spec["command"] == command
assert 2 == len(container_spec["env"])
assert container_spec["env"][0]["name"] == "DUMMY"
assert container_spec["env"][0]["value"] == "test_var"
assert container_spec["env"][1]["name"] == "RUN_ID"
assert container_spec["env"][1]["value"] == "1"
def test_run_kubernetes_job():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = "docker-for-desktop"
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
args = kube_config_mock.call_args_list
assert args[0][1]["context"] == kube_context
def test_run_kubernetes_job_current_kubecontext():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = None
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
with mock.patch("kubernetes.config.load_incluster_config") as incluster_kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
assert kube_config_mock.call_count == 1
assert incluster_kube_config_mock.call_count == 0
def test_run_kubernetes_job_in_cluster():
active_run = mock.Mock()
project_name = "mlflow-docker-example"
image_tag = "image_tag"
image_digest = "5e74a5a"
command = ["python train.py --alpha 0.5 --l1-ratio 0.1"]
env_vars = {"RUN_ID": "1"}
kube_context = None
job_template = yaml.safe_load(
"apiVersion: batch/v1\n"
"kind: Job\n"
"metadata:\n"
" name: pi-with-ttl\n"
" namespace: mlflow\n"
"spec:\n"
" ttlSecondsAfterFinished: 100\n"
" template:\n"
" spec:\n"
" containers:\n"
" - name: pi\n"
" image: perl\n"
" command: ['perl', '-Mbignum=bpi', '-wle']\n"
" restartPolicy: Never\n"
)
with mock.patch("kubernetes.config.load_kube_config") as kube_config_mock:
kube_config_mock.side_effect = ConfigException()
with mock.patch("kubernetes.config.load_incluster_config") as incluster_kube_config_mock:
with mock.patch("kubernetes.client.BatchV1Api.create_namespaced_job") as kube_api_mock:
submitted_run_obj = kb.run_kubernetes_job(
project_name=project_name,
active_run=active_run,
image_tag=image_tag,
image_digest=image_digest,
command=command,
env_vars=env_vars,
job_template=job_template,
kube_context=kube_context,
)
assert submitted_run_obj._mlflow_run_id == active_run.info.run_id
assert submitted_run_obj._job_name.startswith(project_name)
assert submitted_run_obj._job_namespace == "mlflow"
assert kube_api_mock.call_count == 1
assert kube_config_mock.call_count == 1
assert incluster_kube_config_mock.call_count == 1
def test_push_image_to_registry():
image_uri = "dockerhub_account/mlflow-kubernetes-example"
with mock.patch("docker.from_env") as docker_mock:
client = mock.MagicMock()
docker_mock.return_value = client
kb.push_image_to_registry(image_uri)
assert client.images.push.call_count == 1
args = client.images.push.call_args_list
assert args[0][1]["repository"] == image_uri
def test_push_image_to_registry_handling_errors():
image_uri = "dockerhub_account/mlflow-kubernetes-example"
with pytest.raises(ExecutionException):
kb.push_image_to_registry(image_uri)
def test_submitted_run_get_status_killed():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
with mock.patch("kubernetes.client.BatchV1Api.delete_namespaced_job") as kube_api_mock:
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
submitted_run.cancel()
assert RunStatus.KILLED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_failed():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
condition = kubernetes.client.models.V1JobCondition(type="Failed", status="True")
job_status = kubernetes.client.models.V1JobStatus(
active=1,
completion_time=None,
conditions=[condition],
failed=1,
start_time=1,
succeeded=None,
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
print("status", submitted_run.get_status())
assert RunStatus.FAILED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_succeeded():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
condition = kubernetes.client.models.V1JobCondition(type="Complete", status="True")
job_status = kubernetes.client.models.V1JobStatus(
active=None,
completion_time=None,
conditions=[condition],
failed=None,
start_time=None,
succeeded=1,
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
print("status", submitted_run.get_status())
assert RunStatus.FINISHED == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_submitted_run_get_status_running():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
job_status = kubernetes.client.models.V1JobStatus(
active=1, completion_time=None, conditions=None, failed=1, start_time=1, succeeded=1
)
job = kubernetes.client.models.V1Job(status=job_status)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
kube_api_mock.return_value = job
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
assert RunStatus.RUNNING == submitted_run.get_status()
assert kube_api_mock.call_count == 1
args = kube_api_mock.call_args_list
print(args)
assert args[0][1]["name"] == job_name
assert args[0][1]["namespace"] == job_namespace
def test_state_transitions():
mlflow_run_id = 1
job_name = "job-name"
job_namespace = "job-namespace"
submitted_run = kb.KubernetesSubmittedRun(mlflow_run_id, job_name, job_namespace)
with mock.patch("kubernetes.client.BatchV1Api.read_namespaced_job_status") as kube_api_mock:
def set_return_value(**kwargs):
job_status = kubernetes.client.models.V1JobStatus(**kwargs)
kube_api_mock.return_value = kubernetes.client.models.V1Job(status=job_status)
set_return_value()
assert RunStatus.SCHEDULED == submitted_run.get_status()
set_return_value(start_time=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, active=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, succeeded=1)
assert RunStatus.RUNNING == submitted_run.get_status()
set_return_value(start_time=1, failed=1, succeeded=1, completion_time=2)
assert RunStatus.RUNNING == submitted_run.get_status()
condition = kubernetes.client.models.V1JobCondition(type="Complete", status="True")
set_return_value(
conditions=[condition], failed=1, start_time=1, completion_time=2, succeeded=1
)
assert RunStatus.FINISHED == submitted_run.get_status()
|
81001
|
import argparse
import os
import windows
import windows.debug.symbols as symbols
parser = argparse.ArgumentParser(prog=__file__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('pattern')
parser.add_argument('file', help="The PE file to load")
parser.add_argument('--addr', type=lambda x: int(x, 0), default=0, help="The load address of the PE")
parser.add_argument('--tag', type=lambda x: int(x, 0), default=0)
parser.add_argument('--dbghelp', help='The path of DBG help to use (default use env:PFW_DBGHELP_PATH)')
args = parser.parse_args()
if args.dbghelp:
symbols.set_dbghelp_path(args.dbghelp)
else:
if "PFW_DBGHELP_PATH" not in os.environ:
print("Not dbghelp path given and no environ var 'PFW_DBGHELP_PATH' sample may fail")
sh = symbols.VirtualSymbolHandler()
mod = sh.load_file(path=args.file, addr=args.addr)
res = sh.search(args.pattern, mod=mod, tag=args.tag)
print("{0} symbols found:".format(len(res)))
for sym in res:
print(" * {0!r}".format(sym))
|
81024
|
import json
import numpy as np
from pycocotools import mask as maskUtils
thresh = 0.5
# load retrieval results
results_image_id_all = []
results_query_score_all = []
results_query_cls_all = []
results_query_box_all = []
results_gallery_id_all = []
results_gallery_box_all = []
results_name = ' '
with open(results_name, 'r') as f:
results = json.loads(f.read())
for i in results:
box = i['query_bbox']
query_box = [box[0],box[1],box[2]-box[0],box[3]-box[1]]
box = np.array(i['gallery_bbox'])
gallery_box = [box[:,0], box[:,1], box[:,2] - box[:,0], box[:,3] - box[:,1]]
gallery_box = np.transpose(gallery_box,(1,0)).tolist()
results_image_id_all.append(i['query_image_id'])
results_query_score_all.append(i['query_score'])
results_query_cls_all.append(i['query_cls'])
results_query_box_all.append(query_box)
results_gallery_id_all.append(i['gallery_image_id'])
results_gallery_box_all.append(gellery_box)
f.close()
results_image_id_all = np.array(results_image_id_all)
results_query_score_all = np.array(results_query_score_all)
results_query_cls_all = np.array(results_query_cls_all)
results_query_box_all = np.array(results_query_box_all)
results_gallery_id_all = np.array(results_gallery_id_all)
results_gallery_box_all = np.array(results_gallery_box_all)
# load query ground truth
query_image_id_all = []
query_box_all = []
query_cls_all = []
query_style_all = []
query_pair_all = []
query_name = '.../query_gt.json'
with open(query_name, 'r') as f:
query = json.loads(f.read())
for i in query:
box = i['bbox']
box = [box[0], box[1], box[2] - box[0], box[3] - box[1]]
query_image_id_all.append(i['query_image_id'])
query_box_all.append(box)
query_cls_all.append(i['cls'])
query_style_all.append(i['style'])
query_pair_all.append(i['pair_id'])
f.close()
# load gallery ground truth
query_image_id_all = np.array(query_image_id_all)
query_box_all = np.array(query_box_all)
query_cls_all = np.array(query_cls_all)
query_style_all = np.array(query_style_all)
query_pair_all = np.array(query_pair_all)
query_num = len(np.where(query_style_all>0)[0]) # the number of all query clothing items
query_id_real= np.unique(query_image_id_all) # image ids of query clothing items
gallery_image_id_all = []
gallery_box_all = []
gallery_style_all = []
gallery_pair_all = []
gallery_name = '.../gallery_gt.json'
with open(gallery_name, 'r') as f:
gallery = json.loads(f.read())
for i in gallery:
box = i['bbox']
box = [box[0], box[1], box[2] - box[0], box[3] - box[1]]
gallery_image_id_all.append(i['gallery_image_id'])
gallery_box_all.append(box)
gallery_style_all.append(i['style'])
gallery_pair_all.append(i['pair_id'])
f.close()
gallery_image_id_all = np.array(gallery_image_id_all)
gallery_box_all = np.array(gallery_box_all)
gallery_style_all = np.array(gallery_style_all)
gallery_pair_all = np.array(gallery_pair_all)
correct_num_1 = 0
correct_num_5 = 0
correct_num_10 = 0
correct_num_15 = 0
correct_num_20 = 0
miss_num = 0 # the number of query items that fail to be detected
for id in query_id_real:
results_id_ind = np.where(results_image_id_all==id)[0]
if len(results_id_ind) == 0: # in case no clothing item is detected
continue
query_id_ind = np.where(query_image_id_all==id)[0] # all query items in the given image
pair_id = query_pair_all[query_id_ind]
assert len(np.unique(pair_id)) == 1
pair_id = pair_id[0]
results_id_score = results_query_score_all[results_id_ind]
results_id_box = results_query_box_all[results_id_ind]
results_id_cls = results_query_cls_all[results_id_ind]
results_id_gallery_id = results_gallery_id_all[results_id_ind]
results_id_gallery_box = results_gallery_box_all[results_id_ind]
query_id_box = query_box_all[query_id_ind]
query_id_cls = query_cls_all[query_id_ind]
query_id_style = query_style_all[query_id_ind]
is_crowd = np.zeros(len(query_id_box))
iou_id = maskUtils.iou(results_id_box,query_id_box,is_crowd)
iou_ind = np.argmax(iou_id,axis=1) # assign a ground truth label to each detected clothing item
for id_ind in range(0,len(query_id_ind)):
style = query_id_style[id_ind]
cls = query_id_cls[id_ind]
# For a given ground truth query item, select a detected item on behalf of it:
# First find out all detected items which are assigned the given ground truth label
# and are classified correctly.
# Then select the detected item with the highest score among these detected items.
if style>0:
results_style_ind1 = np.where(iou_ind==id_ind)[0]
results_style_ind2 = np.where(results_id_cls==cls)[0]
results_style_ind = np.intersect1d(results_style_ind1,results_style_ind2)
if len(results_style_ind)>0:
results_score_style = results_id_score[results_style_ind]
score_max_ind = np.argmax(results_score_style)
results_style_query_ind = results_style_ind[score_max_ind]
results_style_gallery_id = results_id_gallery_id[results_style_query_ind]
results_style_gallery_box = results_id_gallery_box[results_style_query_ind]
# find out the corresponding ground truth items in the gallery, that is ground truth items which have the same pair id and style as the query item.
gt_gallery_ind1 = np.where(gallery_pair_all==pair_id)[0]
gt_gellery_ind2 = np.where(gallery_style_all==style)[0]
gt_gallery_ind = np.intersect1d(gt_gallery_ind1,gt_gellery_ind2)
gt_gallery_image_id = gallery_image_id_all[gt_gallery_ind]
gt_gallery_box = gallery_box_all[gt_gallery_ind]
assert len(gt_gallery_ind)>0
if len(gt_gallery_ind) == 1:
gt_gallery_image_id = [gt_gallery_image_id]
#calculate top-1
for t in range(0,1):
# if corresponding ground truth gallery images contains retrieved gallery image,
# first find out the exact corresponding ground truth gallery image,
# then find out ground truth gallery items in this ground truth gallery image(whose number may be greater than 1)
# if the overlap between the retrieved gallery item and one of the ground truth gallery items is over the thresh, the retrieved result is positive.
if results_style_gallery_id[t] in gt_gallery_image_id:
which_ind = np.where(gt_gallery_image_id==results_style_gallery_id[t])[0]
crowd = np.zeros(len(which_ind))
iou_style = maskUtils.iou([results_style_gallery_box[t]],gt_gallery_box[which_ind],crowd)
if len(np.where(iou_style>=thresh)[0])>0:
correct_num_1 = correct_num_1 + 1
break
# calculate top-5
for t in range(0,5):
if results_style_gallery_id[t] in gt_gallery_image_id:
which_ind = np.where(gt_gallery_image_id==results_style_gallery_id[t])[0]
crowd = np.zeros(len(which_ind))
iou_style = maskUtils.iou([results_style_gallery_box[t]],gt_gallery_box[which_ind],crowd)
if len(np.where(iou_style >= thresh)[0]) > 0:
correct_num_5 = correct_num_5 + 1
break
# calculate top-10
for t in range(0,10):
if results_style_gallery_id[t] in gt_gallery_image_id:
which_ind = np.where(gt_gallery_image_id==results_style_gallery_id[t])[0]
crowd = np.zeros(len(which_ind))
iou_style = maskUtils.iou([results_style_gallery_box[t]],gt_gallery_box[which_ind],crowd)
if len(np.where(iou_style >= thresh)[0]) > 0:
correct_num_10 = correct_num_10 + 1
break
# calculate top-15
for t in range(0,15):
if results_style_gallery_id[t] in gt_gallery_image_id:
which_ind = np.where(gt_gallery_image_id==results_style_gallery_id[t])[0]
crowd = np.zeros(len(which_ind))
iou_style = maskUtils.iou([results_style_gallery_box[t]],gt_gallery_box[which_ind],crowd)
if len(np.where(iou_style >= thresh)[0]) > 0:
correct_num_15 = correct_num_15 + 1
break
# calculate top-20
for t in range(0,20):
if results_style_gallery_id[t] in gt_gallery_image_id:
which_ind = np.where(gt_gallery_image_id==results_style_gallery_id[t])[0]
crowd = np.zeros(len(which_ind))
iou_style = maskUtils.iou([results_style_gallery_box[t]],gt_gallery_box[which_ind],crowd)
if len(np.where(iou_style >= thresh)[0]) > 0:
correct_num_20 = correct_num_20 + 1
break
else:
miss_num = miss_num + 1
print 'top-1'
print float(correct_num_1)/ query_num
print 'top-5'
print float(correct_num_5)/ query_num
print 'top-10'
print float(correct_num_10)/ query_num
print 'top-15'
print float(correct_num_15)/ query_num
print 'top-20'
print float(correct_num_20)/ query_num
|
81078
|
from classic_tetris_project import discord
from classic_tetris_project.env import env
@discord.client.event
async def on_ready():
import csv
from tqdm import tqdm
from classic_tetris_project import discord
from classic_tetris_project.countries import countries
from classic_tetris_project.models import User
with open('pbs.csv', 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=['twitch_id', 'twitch_username',
'discord_id', 'discord_username',
'ntsc_pb', 'ntsc_pb_updated_at',
'pal_pb', 'pal_pb_updated_at',
'country_code', 'country'])
writer.writeheader()
for user in tqdm(User.objects.all()):
if user.ntsc_pb or user.pal_pb:
d = {
'ntsc_pb': user.ntsc_pb,
'ntsc_pb_updated_at': (user.ntsc_pb_updated_at.isoformat()
if user.ntsc_pb_updated_at else None),
'pal_pb': user.pal_pb,
'pal_pb_updated_at': (user.pal_pb_updated_at.isoformat()
if user.pal_pb_updated_at else None),
'country_code': user.country,
'country': (countries[user.country] if user.country else None),
}
if hasattr(user, 'twitch_user'):
d['twitch_id'] = user.twitch_user.twitch_id
d['twitch_username'] = user.twitch_user.username
if hasattr(user, 'discord_user'):
d['discord_id'] = user.discord_user.discord_id
if user.discord_user.user_obj:
d['discord_username'] = user.discord_user.username
writer.writerow(d)
await discord.client.logout()
discord.client.run(env("DISCORD_TOKEN"))
|
81090
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param state: {"enum": ["UP", "DOWN", "MAINTENANCE"], "type": "string", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.state = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Member(A10BaseClass):
"""Class Description::
Operational Status for the object member.
Class member supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param name: {"description": "Member name", "format": "comp-string", "minLength": 1, "oid": "1001", "optional": false, "maxLength": 127, "type": "string"}
:param port: {"description": "Port number", "format": "number", "default": 65534, "optional": false, "oid": "1002", "maximum": 65534, "minimum": 0, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/service-group/{name}/member/{name}+{port}/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "member"
self.a10_url="/axapi/v3/cgnv6/service-group/{name}/member/{name}+{port}/oper"
self.DeviceProxy = ""
self.oper = {}
self.name = ""
self.port = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
81121
|
from unittest.case import TestCase
import unittest
import pandas as pd
import numpy as np
from datetime import datetime
from qlib import init
from qlib.config import C
from qlib.log import TimeInspector
from qlib.utils.time import cal_sam_minute as cal_sam_minute_new, get_min_cal
def cal_sam_minute(x, sam_minutes):
"""
Sample raw calendar into calendar with sam_minutes freq, shift represents the shift minute the market time
- open time of stock market is [9:30 - shift*pd.Timedelta(minutes=1)]
- mid close time of stock market is [11:29 - shift*pd.Timedelta(minutes=1)]
- mid open time of stock market is [13:00 - shift*pd.Timedelta(minutes=1)]
- close time of stock market is [14:59 - shift*pd.Timedelta(minutes=1)]
"""
# TODO: actually, this version is much faster when no cache or optimization
day_time = pd.Timestamp(x.date())
shift = C.min_data_shift
open_time = day_time + pd.Timedelta(hours=9, minutes=30) - shift * pd.Timedelta(minutes=1)
mid_close_time = day_time + pd.Timedelta(hours=11, minutes=29) - shift * pd.Timedelta(minutes=1)
mid_open_time = day_time + pd.Timedelta(hours=13, minutes=00) - shift * pd.Timedelta(minutes=1)
close_time = day_time + pd.Timedelta(hours=14, minutes=59) - shift * pd.Timedelta(minutes=1)
if open_time <= x <= mid_close_time:
minute_index = (x - open_time).seconds // 60
elif mid_open_time <= x <= close_time:
minute_index = (x - mid_open_time).seconds // 60 + 120
else:
raise ValueError("datetime of calendar is out of range")
minute_index = minute_index // sam_minutes * sam_minutes
if 0 <= minute_index < 120:
return open_time + minute_index * pd.Timedelta(minutes=1)
elif 120 <= minute_index < 240:
return mid_open_time + (minute_index - 120) * pd.Timedelta(minutes=1)
else:
raise ValueError("calendar minute_index error, check `min_data_shift` in qlib.config.C")
class TimeUtils(TestCase):
@classmethod
def setUpClass(cls):
init()
def test_cal_sam_minute(self):
# test the correctness of the code
random_n = 1000
cal = get_min_cal()
def gen_args():
for time in np.random.choice(cal, size=random_n, replace=True):
sam_minutes = np.random.choice([1, 2, 3, 4, 5, 6])
dt = pd.Timestamp(
datetime(
2021,
month=3,
day=3,
hour=time.hour,
minute=time.minute,
second=time.second,
microsecond=time.microsecond,
)
)
args = dt, sam_minutes
yield args
for args in gen_args():
assert cal_sam_minute(*args) == cal_sam_minute_new(*args)
# test the performance of the code
args_l = list(gen_args())
with TimeInspector.logt():
for args in args_l:
cal_sam_minute(*args)
with TimeInspector.logt():
for args in args_l:
cal_sam_minute_new(*args)
if __name__ == "__main__":
unittest.main()
|
81147
|
from numbers import Real, Integral
import numpy as np
import openmc.checkvalue as cv
from .angle_energy import AngleEnergy
from .endf import get_cont_record
class NBodyPhaseSpace(AngleEnergy):
"""N-body phase space distribution
Parameters
----------
total_mass : float
Total mass of product particles
n_particles : int
Number of product particles
atomic_weight_ratio : float
Atomic weight ratio of target nuclide
q_value : float
Q value for reaction in eV
Attributes
----------
total_mass : float
Total mass of product particles
n_particles : int
Number of product particles
atomic_weight_ratio : float
Atomic weight ratio of target nuclide
q_value : float
Q value for reaction in eV
"""
def __init__(self, total_mass, n_particles, atomic_weight_ratio, q_value):
self.total_mass = total_mass
self.n_particles = n_particles
self.atomic_weight_ratio = atomic_weight_ratio
self.q_value = q_value
@property
def total_mass(self):
return self._total_mass
@property
def n_particles(self):
return self._n_particles
@property
def atomic_weight_ratio(self):
return self._atomic_weight_ratio
@property
def q_value(self):
return self._q_value
@total_mass.setter
def total_mass(self, total_mass):
name = 'N-body phase space total mass'
cv.check_type(name, total_mass, Real)
cv.check_greater_than(name, total_mass, 0.)
self._total_mass = total_mass
@n_particles.setter
def n_particles(self, n_particles):
name = 'N-body phase space number of particles'
cv.check_type(name, n_particles, Integral)
cv.check_greater_than(name, n_particles, 0)
self._n_particles = n_particles
@atomic_weight_ratio.setter
def atomic_weight_ratio(self, atomic_weight_ratio):
name = 'N-body phase space atomic weight ratio'
cv.check_type(name, atomic_weight_ratio, Real)
cv.check_greater_than(name, atomic_weight_ratio, 0.0)
self._atomic_weight_ratio = atomic_weight_ratio
@q_value.setter
def q_value(self, q_value):
name = 'N-body phase space Q value'
cv.check_type(name, q_value, Real)
self._q_value = q_value
def to_hdf5(self, group):
"""Write distribution to an HDF5 group
Parameters
----------
group : h5py.Group
HDF5 group to write to
"""
group.attrs['type'] = np.string_('nbody')
group.attrs['total_mass'] = self.total_mass
group.attrs['n_particles'] = self.n_particles
group.attrs['atomic_weight_ratio'] = self.atomic_weight_ratio
group.attrs['q_value'] = self.q_value
@classmethod
def from_hdf5(cls, group):
"""Generate N-body phase space distribution from HDF5 data
Parameters
----------
group : h5py.Group
HDF5 group to read from
Returns
-------
openmc.data.NBodyPhaseSpace
N-body phase space distribution
"""
total_mass = group.attrs['total_mass']
n_particles = group.attrs['n_particles']
awr = group.attrs['atomic_weight_ratio']
q_value = group.attrs['q_value']
return cls(total_mass, n_particles, awr, q_value)
@classmethod
def from_ace(cls, ace, idx, q_value):
"""Generate N-body phase space distribution from ACE data
Parameters
----------
ace : openmc.data.ace.Table
ACE table to read from
idx : int
Index in XSS array of the start of the energy distribution data
(LDIS + LOCC - 1)
q_value : float
Q-value for reaction in eV
Returns
-------
openmc.data.NBodyPhaseSpace
N-body phase space distribution
"""
n_particles = int(ace.xss[idx])
total_mass = ace.xss[idx + 1]
return cls(total_mass, n_particles, ace.atomic_weight_ratio, q_value)
@classmethod
def from_endf(cls, file_obj):
"""Generate N-body phase space distribution from an ENDF evaluation
Parameters
----------
file_obj : file-like object
ENDF file positions at the start of the N-body phase space
distribution
Returns
-------
openmc.data.NBodyPhaseSpace
N-body phase space distribution
"""
items = get_cont_record(file_obj)
total_mass = items[0]
n_particles = items[5]
# TODO: get awr and Q value
return cls(total_mass, n_particles, 1.0, 0.0)
|
81176
|
from ._interface_quantum_simulator import IQuantumSimulator
from ._projectq_quantum_simulator import ProjectqQuantumSimulator
|
81243
|
from ..utils.constants import *
from ..utils.vector3 import vec3, rgb, extract
from functools import reduce as reduce
from ..ray import Ray, get_raycolor
from .. import lights
import numpy as np
from . import Material
from ..textures import *
class Emissive(Material):
def __init__(self, color, **kwargs):
if isinstance(color, vec3):
self.texture_color = solid_color(color)
elif isinstance(color, texture):
self.texture_color = color
super().__init__(**kwargs)
def get_color(self, scene, ray, hit):
diff_color = self.texture_color.get_color(hit)
return diff_color
|
81258
|
from debug import *
from presto_instance import *
def rewrite(source, match_template, rewrite_template):
if DEBUG_ROOIBOS:
print '[DEBUG ROOIBOS] source: %r' % source
print '[DEBUG ROOIBOS] match template: %r' % match_template
print '[DEBUG ROOIBOS] rewrite template: %r' % rewrite_template
result = p.rewrite(source, match_template, rewrite_template)
if result.status_code == 200:
if DEBUG_ROOIBOS:
print '[DEBUG ROOIBOS] rooibos result:', result
return result.text
else:
if DEBUG_ROOIBOS:
print '[DEBUG ROOIBOS] Error: could not rewrite!'
return ''
|
81275
|
import datetime
import os
from common import settings
# environment variables must be set
TEST_USE_STATIC_DATA = os.getenv('TEST_USE_STATIC_DATA', True)
test_api_key = os.getenv('TEST_API_KEY')
NUMBER_OF_ADS = 5029
DAWN_OF_TIME = '1971-01-01T00:00:01'
current_time_stamp = datetime.datetime.now().strftime(settings.DATE_FORMAT)
test_headers = {'api-key': test_api_key, 'accept': 'application/json'}
test_host = os.getenv('TEST_HOST', 'http://127.0.0.1')
test_port = os.getenv('TEST_PORT', 5000)
TEST_URL = f"{test_host}:{test_port}"
EXPECTED_GYMNASIE_LARARE = 60
|
81277
|
from . import runner
from . import worker
from . import default
from . import zeromq
from . import slurm
from .runner import Runner, RunnerInterface
from .worker import Worker, Interface, Preprocessor, Postprocessor
|
81288
|
import InstrumentDriver
import numpy as np
class Driver(InstrumentDriver.InstrumentWorker):
""" This class implements a simple signal generator driver"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
pass
def performClose(self, bError=False, options={}):
"""Perform the close instrument connection operation"""
pass
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# just return the value
return value
def performGetValue(self, quant, options={}):
"""Perform the Get Value instrument operation"""
# proceed depending on quantity
if quant.name == 'Signal':
# if asking for signal, start with getting values of other controls
amp = self.getValue('Amplitude')
freq = self.getValue('Frequency')
phase = self.getValue('Phase')
add_noise = self.getValue('Add noise')
# calculate time vector from 0 to 1 with 1000 elements
time = np.linspace(0,1,1000)
signal = amp * np.sin(freq*time*2*np.pi + phase*np.pi/180.0)
# add noise
if add_noise:
noise_amp = self.getValue('Noise amplitude')
signal += noise_amp * np.random.randn(len(signal))
# create trace object that contains timing info
trace = quant.getTraceDict(signal, t0=0.0, dt=time[1]-time[0])
# finally, return the trace object
return trace
else:
# for other quantities, just return current value of control
return quant.getValue()
|
81295
|
from typing import Union
import numpy as np
def moore_n(
n: int, position: tuple, grid: np.ndarray, invariant: Union[int, np.ndarray] = 0
):
"""Gets the N Moore neighborhood at given postion."""
row, col = position
nrows, ncols = grid.shape
# Target offsets from position.
ofup, ofdo = row + np.array([-n, +n])
ofle, ofri = col + np.array([-n, +n])
try:
if ofup < 0 or ofle < 0 or ofdo + 1 > nrows or ofri + 1 > ncols:
raise IndexError
# Current Grid is enough, just return the requested values.
return grid[ofup : ofdo + 1, ofle : ofri + 1]
except IndexError:
invariant = np.array(invariant, dtype=grid.dtype)
# 1. Generate extended grid.
# Grid lenght at step N.
l = lambda n: 2 * n + 1
ln = l(n)
egrid = np.repeat(invariant, ln * ln).reshape(ln, ln)
# 2. Populate middle cell.
mid = ln // 2
egrid[mid, mid] = grid[row, col]
is_legal = {
"up": ofup >= 0,
"down": ofdo <= nrows - 1,
"left": ofle >= 0,
"right": ofri <= ncols - 1,
}
# Distance
d = lambda a, b: abs(b - a)
# 3. Populate Up-Left Corner
if is_legal["up"] and is_legal["left"]:
egrid[mid - n : mid + 1, mid - n : mid + 1] = grid[
row - n : row + 1, col - n : col + 1
]
elif not is_legal["up"] and not is_legal["left"]: # Both ilegal
br = d(row, 0)
bc = d(col, 0)
egrid[mid - br : mid + 1, mid - bc : mid + 1] = grid[
row - br : row + 1, col - bc : col + 1
]
elif not is_legal["up"]:
br = d(row, 0) # Distance to the border
egrid[mid - br : mid + 1, mid - n : mid + 1] = grid[
row - br : row + 1, col - n : col + 1
]
elif not is_legal["left"]:
bc = d(col, 0)
egrid[mid - n : mid + 1, mid - bc : mid + 1] = grid[
row - n : row + 1, col - bc : col + 1
]
# 4. Populate Up-Right Corner
if is_legal["up"] and is_legal["right"]:
egrid[mid - n : mid + 1, mid : mid + n + 1] = grid[
row - n : row + 1, col : col + n + 1
]
elif not is_legal["up"] and not is_legal["right"]:
br = d(row, 0)
bc = d(col, ncols)
egrid[mid - br : mid + 1, mid : mid + bc] = grid[
row - br : row + 1, col : col + bc
]
elif not is_legal["up"]:
br = d(row, 0)
egrid[mid - br : mid + 1, mid : mid + n + 1] = grid[
row - br : row + 1, col : col + n + 1
]
elif not is_legal["right"]:
bc = d(col, ncols)
egrid[mid - n : mid + 1, mid : mid + bc] = grid[
row - n : row + 1, col : col + bc
]
# 5. Populate Down-Left Corner
if is_legal["down"] and is_legal["left"]:
egrid[mid : mid + n + 1, mid - n : mid + 1] = grid[
row : row + n + 1, col - n : col + 1
]
elif not is_legal["down"] and not is_legal["left"]:
br = d(row, nrows)
bc = d(col, 0)
egrid[mid : mid + br, mid - bc : mid + 1] = grid[
row : row + br, col - bc : col + 1
]
elif not is_legal["down"]:
br = d(row, nrows)
egrid[mid : mid + br, mid - n : mid + 1] = grid[
row : row + br, col - n : col + 1
]
elif not is_legal["left"]:
bc = d(col, 0)
egrid[mid : mid + n + 1, mid - bc : mid + 1] = grid[
row : row + n + 1, col - bc : col + 1
]
# 6. Populate Down-Right Corner
if is_legal["down"] and is_legal["right"]:
egrid[mid : mid + n + 1, mid : mid + n + 1] = grid[
row : row + n + 1, col : col + n + 1
]
elif not is_legal["down"] and not is_legal["right"]:
br = d(row, nrows)
bc = d(col, ncols)
egrid[mid : mid + br, mid : mid + bc] = grid[row : row + br, col : col + bc]
elif not is_legal["down"]:
br = d(row, nrows)
egrid[mid : mid + br, mid : mid + n + 1] = grid[
row : row + br, col : col + n + 1
]
elif not is_legal["right"]:
bc = d(col, ncols)
egrid[mid : mid + n + 1, mid : mid + bc] = grid[
row : row + n + 1, col : col + bc
]
return egrid
# Depracated: Still used as interface for CAs.
# Superseded by Moore N function.
def neighborhood_at(grid, pos, invariant=0):
"""
Calculates the Moore's neighborhood of cell at target position 'pos'.
The boundary conditions are invariant and set to 'empty'.
Returns a named tuple with the values of the nighborhood cells in the following
order: up_left, up, up_right,
left, self, right,
down_left, down, down_right
"""
from collections import namedtuple
Neighbors = namedtuple(
"Neighbors",
[
"up_left",
"up",
"up_right",
"left",
"self",
"right",
"down_left",
"down",
"down_right",
],
)
N = 1
neighborhood = moore_n(N, pos, grid, invariant).flatten().tolist()
return Neighbors(*neighborhood)
|
81301
|
import moment
import os
import pandas
import pyarrow as pa
import pyarrow.parquet as pq
import requests
from func_timeout import func_set_timeout, FunctionTimedOut
from pandas import DataFrame
from pathlib import Path
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, lit
from pyspark.sql.types import StringType
from python_utils.src.spark_udfs import get_confidence_based_h3_and_name_distance, get_h3_distance, get_string_distance
from time import sleep
MAX_H3_DISTANCE = 500
class SearchScraper:
"""Get result for search strings"""
@staticmethod
@func_set_timeout(180)
def send_query(batch, query_type):
# noinspection PyBroadException
try:
host = os.getenv('GOOGLE_POI_API_HOST') or '127.0.0.1'
# noinspection HttpUrlsUsage
result = requests.request(
method='get',
url=f'http://{host}:3003/{"search" if query_type == "search" else "poi-information"}',
json=batch)
return result.json() if result else None
except Exception as e:
print(f'[{moment.now().format("YYYY-MM-DDTHH-mm-ss")}]: Search query failed: ', e)
print(f'[{moment.now().format("YYYY-MM-DDTHH-mm-ss")}]: Continuing without batch.')
return None
"""Match the queries that have been sent to the received results"""
@staticmethod
def match_search_results(directory: str, file_name: str):
memory = os.getenv('SPARK_MEMORY') or '16g'
spark = SparkSession.builder.appName('google-poi').config('spark.driver.memory', memory).getOrCreate()
df_str = spark.read.parquet(directory + file_name)
path_results = directory.replace('Strings', 'Results') + file_name.replace('strings', 'results')
df_res = spark.read.parquet(path_results)
# noinspection PyTypeChecker
df_res = df_str \
.alias('df_str') \
.join(df_res, df_str.query == df_res.query, 'inner') \
.filter(col('data.h3Index').isNotNull()) \
.withColumn('osmName', col('df_str.name')) \
.withColumn('googleName', col('data.name')) \
.withColumn(
'nameDistance',
get_string_distance(col('googleName'), col('osmName'), col('df_str.query'))
) \
.withColumn(
'h3Distance',
get_h3_distance(col('h3Index').cast(StringType()), col('data.h3Index').cast(StringType()),
lit(MAX_H3_DISTANCE))
) \
.withColumn(
'confidence',
get_confidence_based_h3_and_name_distance(col('h3Distance'), col('nameDistance'), lit(MAX_H3_DISTANCE))
) \
.select('osmId', 'type', 'confidence', 'data.id')
df_res.write.parquet(path_results.replace('results', 'results_matched'))
"""Match the POI ids that have been sent to the received results"""
@staticmethod
def match_poi_results(directory: str, file_name: str):
memory = os.getenv('SPARK_MEMORY') or '16g'
spark = SparkSession.builder.appName('google-poi').config('spark.driver.memory', memory).getOrCreate()
df_res = spark.read.parquet(
directory.replace('Strings', 'Results') + file_name.replace('strings', 'results_matched')
)
path_poi_data = directory.replace('searchStrings', 'poiData') + file_name.replace('search_strings', 'poi_data')
df_pd = spark.read.parquet(path_poi_data)
# noinspection PyTypeChecker
df_pd = df_res \
.alias('df_res') \
.join(df_pd, df_res.id == df_pd.id, 'inner') \
.filter(col('data.h3Index').isNotNull()) \
.select('osmId', 'type', 'confidence', col('df_res.id').alias('id'), 'data.*')
df_pd.write.parquet(path_poi_data.replace('poi_data', 'poi_data_matched'))
"""Send queries in batches for each partition of a dataframe"""
@staticmethod
def batch_queries(
df: DataFrame,
output_dir: str,
file_name: str,
query_property: str,
query_type: str,
schema=None
):
batch = list()
batch_size = 100
max_sleep_time = 120
writer = None
for index, row in df.iterrows():
batch.append(row[query_property])
# noinspection PyTypeChecker
if (len(batch) == batch_size) or ((index + 1) == len(df.index)):
successful = False
sleep_time = 1
while not successful and (sleep_time < max_sleep_time):
try:
result = SearchScraper.send_query(batch, query_type)
if result and ('data' in result):
data = pandas.DataFrame(result['data'])
# noinspection PyArgumentList
table = pa.Table.from_pandas(df=data, schema=schema)
if not writer:
script_dir = os.path.dirname(__file__)
output_dir = os.path.join(script_dir, output_dir)
output_file = os.path.join(output_dir, file_name)
Path(output_dir).mkdir(parents=True, exist_ok=True)
writer = pq.ParquetWriter(
output_file,
schema=schema if schema else table.schema,
flavor='spark'
)
writer.write_table(table)
successful = True
else:
sleep(sleep_time)
sleep_time *= 2
except FunctionTimedOut:
sleep(sleep_time)
sleep_time *= 2
if sleep_time >= max_sleep_time:
print(f'[{moment.now().format("YYYY-MM-DDTHH-mm-ss")}]: Request timed out too many times. '
f'Skipping batch')
batch = list()
if writer:
writer.close()
"""Send Google POI ids to retrieve all POI information"""
@staticmethod
def send_poi_queries(directory: str, file_name: str):
pois = pq \
.read_table(directory.replace('Strings', 'Results') + file_name.replace('strings', 'results_matched')) \
.to_pandas()
pois = pois[['id']].drop_duplicates()
schema = pa.schema([
pa.field('id', pa.string()),
pa.field('data', pa.struct([
pa.field('name', pa.string()),
pa.field('placeID', pa.string()),
pa.field('location', pa.struct([
pa.field('lat', pa.float64()),
pa.field('lng', pa.float64())
])),
pa.field('h3Index', pa.string()),
pa.field('address', pa.list_(pa.string())),
pa.field('timezone', pa.string()),
pa.field('categories', pa.struct([
pa.field('google', pa.list_(pa.string())),
pa.field('kuwala', pa.list_(pa.string()))
])),
pa.field('temporarilyClosed', pa.bool_()),
pa.field('permanentlyClosed', pa.bool_()),
pa.field('insideOf', pa.string()),
pa.field('contact', pa.struct([
pa.field('phone', pa.string()),
pa.field('website', pa.string())
])),
pa.field('openingHours', pa.list_(pa.struct([
pa.field('closingTime', pa.string()),
pa.field('openingTime', pa.string()),
pa.field('date', pa.string())
]))),
pa.field('rating', pa.struct([
pa.field('numberOfReviews', pa.int64()),
pa.field('stars', pa.float64())
])),
pa.field('priceLevel', pa.int64()),
pa.field('popularity', pa.list_(pa.struct([
pa.field('popularity', pa.int64()),
pa.field('timestamp', pa.string())
]))),
pa.field('waitingTime', pa.list_(pa.struct([
pa.field('waitingTime', pa.int64()),
pa.field('timestamp', pa.string())
]))),
pa.field('spendingTime', pa.list_(pa.int64()))
]))
])
SearchScraper.batch_queries(
df=pois,
output_dir=f'../../tmp/googleFiles/poiData/',
file_name=file_name.replace('search_strings', 'poi_data'),
query_property='id',
query_type='poi',
schema=schema
)
"""Send search strings to get Google POI ids"""
@staticmethod
def send_search_queries(directory: str, file_name: str):
search_strings = pq.read_table(directory + file_name).to_pandas()
schema = pa.schema([
pa.field('query', pa.string()),
pa.field('data', pa.struct([
pa.field('h3Index', pa.string()),
pa.field('id', pa.string()),
pa.field('location', pa.struct([
pa.field('lat', pa.float64()),
pa.field('lng', pa.float64())
])),
pa.field('name', pa.string())
]))
])
SearchScraper.batch_queries(
df=search_strings,
output_dir=f'../../tmp/googleFiles/searchResults/',
file_name=file_name.replace('strings', 'results'),
query_property='query',
query_type='search',
schema=schema
)
"""Write scraped POI information to a Parquet file"""
@staticmethod
def scrape_with_search_string():
script_dir = os.path.dirname(__file__)
parquet_files = os.path.join(script_dir, '../../tmp/googleFiles/searchStrings/')
file_name = sorted(os.listdir(parquet_files), reverse=True)[0]
SearchScraper.send_search_queries(parquet_files, file_name)
SearchScraper.match_search_results(parquet_files, file_name)
SearchScraper.send_poi_queries(parquet_files, file_name)
SearchScraper.match_poi_results(parquet_files, file_name)
|
81302
|
import pyautogui as pag
from getpass import getpass
import sys
import json
import requests
import keyboard
import time
import random
import webbrowser
from os import system, name
from colorama import Fore, Back, Style
import os
os.system('cls')
def clear():
if name == 'nt':
_ = system('cls')
def add():
try:
namesFile = open("names.txt", "r")
except:
print(Style.BRIGHT + Fore.LIGHTRED_EX +
"Could not open names.txt - please make sure this file exists")
exit()
namesData = namesFile.read()
names = namesData.strip().split("\n")
print(Style.BRIGHT + Fore.LIGHTRED_EX + "Retrieved names from names.txt\n")
print(Style.BRIGHT + Fore.LIGHTRED_EX +
":: Click enter when your mouse is over the 'Add Friend Button' ::")
if keyboard.read_key() == "enter":
addFriend = pag.position()
print(f"Cords captured: {addFriend}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'Close Button' ::")
if keyboard.read_key() == "enter":
close = pag.position()
print(f"Cords captured: {close}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'Find Friend Search Bar' ::")
if keyboard.read_key() == "enter":
searchBar = pag.position()
print(f"Cords captured: {searchBar}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'Clear Friend Bar' ::")
if keyboard.read_key() == "enter":
clearText = pag.position()
print(f"Cords captured: {clearText}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'First Add Button' ::")
while True:
if keyboard.read_key() == "enter":
firstAdd = pag.position()
print(f"Cords captured: {firstAdd}")
break
else:
continue
clear()
def adder(name):
# move to search bar
pag.moveTo(searchBar[0], searchBar[1], 0.5)
pag.click()
time.sleep(2)
# write out name
pag.typewrite(name, interval=0.10)
time.sleep(2)
# move to first add, then add
pag.moveTo(firstAdd[0], firstAdd[1], 0.5)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
pag.click()
time.sleep(2)
# clears search bar
pag.moveTo(clearText[0], clearText[1], 0.5)
pag.click()
print("will cycle through your name list randomly\n\n")
time.sleep(3)
for name in names:
print("Doing: {0}".format(name))
adder(name)
time.sleep(1)
def sendsnap():
print(Style.BRIGHT + Fore.LIGHTRED_EX +
'How many cycles would you like to use? (1200 score per cycle): ')
print(Fore.CYAN + '\n\n\n\n\nSnapify> ', end='')
amount = float(input(Fore.WHITE + ''))
clear()
while True:
print(Style.BRIGHT + Fore.LIGHTRED_EX +
'Would you like to mute your microphone?(Note: this will make the program slower)(Yes/No) ')
print(Fore.CYAN + '\n\n\n\n\nSnapify> ', end='')
mute = input(Fore.WHITE + '').lower()
clear()
if mute == 'yes':
break
elif mute == 'no':
break
else:
getpass(Style.BRIGHT + Fore.LIGHTRED_EX +
'Error. You inserted an invalid option. Please try again.')
clear()
continue
print(Style.BRIGHT + Fore.LIGHTRED_EX +
":: Click enter when your mouse is over the 'Camera Button' ::")
if keyboard.read_key() == "enter":
CameraButton = pag.position()
print(f"Cords captured: {CameraButton}")
time.sleep(1)
if mute == 'yes':
print(":: Take a 3 second video then click enter when your mouse is over the 'Mute Button' ::")
if keyboard.read_key() == "enter":
MuteButton = pag.position()
print(f"Cords captured: {MuteButton}")
time.sleep(1)
print(":: Click on the 'Mute Button' then click enter when your mouse is over the 'Send To Button' ::")
if keyboard.read_key() == "enter":
SendToButton = pag.position()
print(f"Cords captured: {SendToButton}")
time.sleep(1)
elif mute == 'no':
print(":: Take a picture then click enter when your mouse is over the 'Send To Button' ::")
if keyboard.read_key() == "enter":
SendToButton = pag.position()
print(f"Cords captured: {SendToButton}")
time.sleep(1)
print(":: Click on the 'Send To Button' then click enter when your mouse is over the 'Last Snap Button' ::")
if keyboard.read_key() == "enter":
LastSnapButton = pag.position()
print(f"Cords captured: {LastSnapButton}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'Send Snap Arrow' ::")
if keyboard.read_key() == "enter":
SendSnapArrow = pag.position()
print(f"Cords captured: {SendSnapArrow}")
time.sleep(1)
print(":: Click enter when your mouse is over the 'Camera Logo at the bottom center' ::")
if keyboard.read_key() == "enter":
CameraLogo = pag.position()
print(f"Cords captured: {CameraLogo}")
# countdown screen
TimeToHomePage = 15
while TimeToHomePage >= 0:
clear()
print(
f'You have {TimeToHomePage} seconds to go back to the snapchat homescreen before the boost begins.')
time.sleep(1)
TimeToHomePage -= 1
clear()
print(
f"Started boosting! Please don't turn off your phone or close this window while it's running. This will run for {amount} cycle('s)")
print('\n\n\n\n\n\n')
while amount > 0:
# move to camera button and record for one minute
pag.moveTo(CameraButton[0], CameraButton[1], 2)
pag.mouseDown()
time.sleep(63)
pag.mouseUp()
# if mute click yes
if mute == 'yes':
pag.moveTo(MuteButton[0], MuteButton[1], 2)
pag.click()
# move to send to button and click
pag.moveTo(SendToButton[0], SendToButton[1], 2)
pag.click()
# move to last snap and click
pag.moveTo(LastSnapButton[0], LastSnapButton[1], 2)
pag.click()
# move to send snap button and click
pag.moveTo(SendToButton[0], SendToButton[1], 2)
pag.click()
# move to send snap arrow and click
pag.moveTo(SendSnapArrow[0], SendSnapArrow[1], 2)
pag.click()
# move to camera logo and click
pag.moveTo(CameraLogo[0], CameraLogo[1], 2)
pag.click()
amount -= 1
print(f'Finished one cycle. {amount} left to go.')
clear()
print(Fore.GREEN + 'Finished Boosting. Thanks for using our tool.')
print(Fore.MAGENTA + '\n\nPlease check us out at:\n\nQuessts: https://cracked.to/Quessts\nANG: https://cracked.to/ANG', end='')
getpass(Fore.WHITE + '')
sys.exit()
def removefriends():
print(Style.BRIGHT + Fore.LIGHTRED_EX +
":: Head over to the recently added friends section then click enter when your mouse is over the first users 'icon' ::")
if keyboard.read_key() == "enter":
IconButton = pag.position()
print(f"Cords captured: {IconButton}")
time.sleep(1)
print(":: Click on the 'icon' then click enter when your mouse is over the '3 dots' on the top right corner ::")
if keyboard.read_key() == "enter":
ThreeDotsButton = pag.position()
print(f"Cords captured: {ThreeDotsButton}")
time.sleep(1)
print(":: Click on the '3 dots' then click enter when your mouse is over the 'remove friend' button ::")
if keyboard.read_key() == "enter":
RemoveFriendButton = pag.position()
print(f"Cords captured: {RemoveFriendButton}")
time.sleep(1)
print(":: Click on the 'remove friend' button then click enter when your mouse is over the 'confirm remove friend' button ::")
if keyboard.read_key() == "enter":
ConfirmRemoveFriendButton = pag.position()
print(f"Cords captured: {ConfirmRemoveFriendButton}")
time.sleep(1)
clear()
# countdown screen
TimeToRecentlyAddedPage = 15
while TimeToRecentlyAddedPage >= 0:
clear()
print(
f'You have {TimeToRecentlyAddedPage} seconds to go back to the snapchat recently added menu before the bot starts.')
time.sleep(1)
TimeToRecentlyAddedPage -= 1
clear()
print('Started removing friends! This proccess will contniue forever and has to be stopped manually.')
while True:
# move to icon button and click
pag.moveTo(IconButton[0], IconButton[1], 2)
pag.click()
# move to three dots button and click
pag.moveTo(ThreeDotsButton[0], ThreeDotsButton[1], 2)
pag.click()
# move to remove friend button and click
pag.moveTo(RemoveFriendButton[0], RemoveFriendButton[1], 2)
pag.click()
# move to confirm remove friend button and click
pag.moveTo(ConfirmRemoveFriendButton[0],
ConfirmRemoveFriendButton[1], 2)
pag.click()
time.sleep(6)
while True:
print(Style.BRIGHT + Fore.LIGHTRED_EX + """
%( #%
%### ###%
%%%%##### @@@/@@@/@@@, #####%##% ███████╗███╗ ██╗ █████╗ ██████╗ ██╗███████╗██╗ ██╗
%&%%%#%&@ @&%%#%%&% ██╔════╝████╗ ██║██╔══██╗██╔══██╗██║██╔════╝╚██╗ ██╔╝
%&&% %%%&% ███████╗██╔██╗ ██║███████║██████╔╝██║█████╗ ╚████╔╝
@@ @% ╚════██║██║╚██╗██║██╔══██║██╔═══╝ ██║██╔══╝ ╚██╔╝
@@ @& ███████║██║ ╚████║██║ ██║██║ ██║██║ ██║
*@/ @@ @* @@ ╚══════╝╚═╝ ╚═══╝╚═╝ ╚═╝╚═╝ ╚═╝╚═╝ ╚═╝
,@@ &@@
/ @@ #1 Free snapchat booster Developed by https://cracked.to/Quessts
@@ @% and partially by https://cracked.to/ANG
@@ &@#
%@@@* @@@@* V1.5 | need help? https://discord.link/Snapify
&@@@@( @@@@@
@@@@@@@ #@@@@@@@
@@@#@@(##@@(
""")
print(
'Select module:\n\n\n\n1) Add Friends\n\n2) Boost\n\n3) Remove Recently Added Friends\n\n\n ')
print(Fore.CYAN + 'Snapify> ', end='')
option = input(Fore.WHITE + '')
if option == '1':
while True:
clear()
print(Style.BRIGHT + Fore.LIGHTRED_EX +
"If you know how to use this type 'yes' if you don't type 'no'\n\n")
print(Fore.CYAN + 'Snapify> ', end='')
watchvid = input(Fore.WHITE + '').lower()
if watchvid == 'yes':
clear()
add()
elif watchvid == 'no':
clear()
webbrowser.open_new('https://youtu.be/uA4nNGV_jr8')
add()
else:
getpass(Style.BRIGHT + Fore.LIGHTRED_EX +
'Error. You inserted an invalid option. Please try again.')
clear()
continue
elif option == '2':
while True:
clear()
print(Style.BRIGHT + Fore.LIGHTRED_EX +
"If you know how to use this type 'yes' if you don't type 'no'\n\n")
print(Fore.CYAN + 'Snapify> ', end='')
watchvid = input(Fore.WHITE + '').lower()
if watchvid == 'yes':
clear()
sendsnap()
elif watchvid == 'no':
clear()
webbrowser.open_new('https://youtu.be/uA4nNGV_jr8')
sendsnap()
else:
getpass(Style.BRIGHT + Fore.LIGHTRED_EX +
'Error. You inserted an invalid option. Please try again.')
clear()
continue
elif option == '3':
while True:
clear()
print(Style.BRIGHT + Fore.LIGHTRED_EX +
"If you know how to use this type 'yes' if you don't type 'no'\n\n")
print(Fore.CYAN + 'Snapify> ', end='')
watchvid = input(Fore.WHITE + '').lower()
if watchvid == 'yes':
clear()
removefriends()
elif watchvid == 'no':
clear()
webbrowser.open_new('https://youtu.be/uA4nNGV_jr8')
removefriends()
else:
getpass(Style.BRIGHT + Fore.LIGHTRED_EX +
'Error. You inserted an invalid option. Please try again.')
clear()
continue
else:
getpass(Style.BRIGHT + Fore.LIGHTRED_EX +
'Error. You inserted an invalid option. Please try again.')
continue
|
81304
|
import torch
import os
import time
import numpy as np
from tqdm import tqdm
from collections import OrderedDict
from torch import optim
from torch import nn
from torchvision.utils import save_image
class Base:
def _get_stats(self, dict_, mode):
stats = OrderedDict({})
for key in dict_.keys():
stats[key] = np.mean(dict_[key])
return stats
def train(self,
itr_train,
itr_valid,
epochs,
model_dir,
result_dir,
save_every=1,
scheduler_fn=None,
scheduler_args={}):
for folder_name in [model_dir, result_dir]:
if folder_name is not None and not os.path.exists(folder_name):
os.makedirs(folder_name)
f_mode = 'w' if not os.path.exists("%s/results.txt" % result_dir) else 'a'
f = None
if result_dir is not None:
f = open("%s/results.txt" % result_dir, f_mode)
if 'SLURM_JOB_NAME' in os.environ and os.environ['SLURM_JOB_NAME'] not in ['bash', 'sh']:
# If this is an sbatch job, don't make it verbose
verbose = False
else:
verbose = True
for epoch in range(self.last_epoch, epochs):
epoch_start_time = time.time()
# Training.
if verbose:
pbar = tqdm(total=len(itr_train))
train_dict = OrderedDict({'epoch': epoch+1})
# item, pose, id
for b, batch in enumerate(itr_train):
batch = self.prepare_batch(batch)
losses, outputs = self.train_on_instance(*batch,
iter=b+1)
for key in losses:
this_key = 'train_%s' % key
if this_key not in train_dict:
train_dict[this_key] = []
train_dict[this_key].append(losses[key])
if verbose:
pbar.update(1)
pbar.set_postfix(self._get_stats(train_dict, 'train'))
# Process handlers.
for handler_fn in self.handlers:
handler_dict = handler_fn(losses, batch, outputs,
{'epoch':epoch+1, 'iter':b+1, 'mode':'train'})
for key in handler_dict.keys():
this_key = 'train_%s' % key
if this_key not in train_dict:
train_dict[this_key] = []
train_dict[this_key].append(handler_dict[key])
if verbose:
pbar.close()
valid_dict = {}
# TODO: enable valid
if verbose:
pbar = tqdm(total=len(itr_valid))
# Validation.
valid_dict = OrderedDict({})
for b, valid_batch in enumerate(itr_valid):
valid_batch = self.prepare_batch(valid_batch)
valid_losses, valid_outputs = self.eval_on_instance(*valid_batch,
iter=b+1)
for key in valid_losses:
this_key = 'valid_%s' % key
if this_key not in valid_dict:
valid_dict[this_key] = []
valid_dict[this_key].append(valid_losses[key])
if verbose:
pbar.update(1)
pbar.set_postfix(self._get_stats(valid_dict, 'valid'))
# Process handlers.
for handler_fn in self.handlers:
handler_dict = handler_fn(valid_losses, valid_batch, valid_outputs,
{'epoch':epoch+1, 'iter':b+1, 'mode':'valid'})
for key in handler_dict.keys():
this_key = 'valid_%s' % key
if this_key not in valid_dict:
valid_dict[this_key] = []
valid_dict[this_key].append(handler_dict[key])
if verbose:
pbar.close()
# Step learning rates.
for sched in self.schedulers:
sched.step()
# Update dictionary of values.
all_dict = train_dict
all_dict.update(valid_dict)
for key in all_dict:
all_dict[key] = np.mean(all_dict[key])
for key in self.optim:
all_dict["lr_%s" % key] = \
self.optim[key].state_dict()['param_groups'][0]['lr']
all_dict['time'] = time.time() - epoch_start_time
str_ = ",".join([str(all_dict[key]) for key in all_dict])
print(str_)
if result_dir is not None:
if (epoch+1) == 1:
f.write(",".join(all_dict.keys()) + "\n")
f.write(str_ + "\n")
f.flush()
if (epoch+1) % save_every == 0 and model_dir is not None:
self.save(filename="%s/%i.pkl" % (model_dir, epoch+1),
epoch=epoch+1)
if f is not None:
f.close()
|
81463
|
from .base_options import BaseOptions
class TrainOptions(BaseOptions):
def initialize(self):
BaseOptions.initialize(self)
self.parser.add_argument('--display_freq', type=int, default=50, help='frequency of displaying average loss')
self.parser.add_argument('--save_epoch_freq', type=int, default=50, help='frequency of saving checkpoints at the end of epochs')
self.parser.add_argument('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
self.parser.add_argument('--niter', type=int, default=1000, help='# of epochs to train')
self.parser.add_argument('--learning_rate_decrease_itr', type=int, default=-1, help='how often is the learning rate decreased by six percent')
self.parser.add_argument('--decay_factor', type=float, default=0.94, help='learning rate decay factor')
self.parser.add_argument('--tensorboard', type=bool, default=False, help='use tensorboard to visualize loss change ')
self.parser.add_argument('--measure_time', type=bool, default=False, help='measure time of different steps during training')
self.parser.add_argument('--validation_on', action='store_true', help='whether to test on validation set during training')
self.parser.add_argument('--validation_freq', type=int, default=100, help='frequency of testing on validation set')
self.parser.add_argument('--validation_batches', type=int, default=10, help='number of batches to test for validation')
self.parser.add_argument('--enable_data_augmentation', type=bool, default=True, help='whether to augment input frame')
#model arguments
self.parser.add_argument('--weights_visual', type=str, default='', help="weights for visual stream")
self.parser.add_argument('--weights_audio', type=str, default='', help="weights for audio stream")
self.parser.add_argument('--weights_fusion', type=str, default='', help="weights for fusion stream")
self.parser.add_argument('--unet_ngf', type=int, default=64, help="unet base channel dimension")
self.parser.add_argument('--unet_input_nc', type=int, default=2, help="input spectrogram number of channels")
self.parser.add_argument('--unet_output_nc', type=int, default=2, help="output spectrogram number of channels")
#optimizer arguments
self.parser.add_argument('--lr_visual', type=float, default=0.0001, help='learning rate for visual stream')
self.parser.add_argument('--lr_audio', type=float, default=0.001, help='learning rate for audio stream')
self.parser.add_argument('--lr_fusion', type=float, default=0.001, help='learning rate for fusion part')
self.parser.add_argument('--optimizer', default='adam', type=str, help='adam or sgd for optimization')
self.parser.add_argument('--beta1', default=0.9, type=float, help='momentum for sgd, beta1 for adam')
self.parser.add_argument('--weight_decay', default=0.0005, type=float, help='weights regularizer')
#loss
self.parser.add_argument('--loss_mode', type=str, choices=['l1', 'mse'], default='l1', help='the loss mode')
self.parser.add_argument('--sep_loss_weight', type=float, default=1., help='the loss weight for separation training')
self.parser.add_argument('--stereo_loss_weight', type=float, default=1., help='the loss weight for stereo training')
self.parser.add_argument('--val_return_key', default='stereo_loss', type=str, help='which loss item to return during validation')
self.mode = "train"
self.isTrain = True
self.enable_data_augmentation = True
|
81495
|
from boa3.builtin import public
def TestAdd(a: int, b: int) -> int:
return a + b
@public
def Main() -> int:
return TestAdd(1, 2)
|
81544
|
from pydantic.dataclasses import dataclass
from ...samplers import BaseSamplerConfig
@dataclass
class VAMPSamplerConfig(BaseSamplerConfig):
"""This is the VAMP prior sampler configuration instance deriving from
:class:`BaseSamplerConfig`.
"""
pass
|
81554
|
import time
import logging
import traceback
from multiprocessing import Process
from threading import Lock
LOG = logging.getLogger(__name__)
class _Task:
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self):
return self.func(*self.args, **self.kwargs)
class ProcessManager:
def __init__(self):
self._tasks = {}
self._lock = Lock()
self._quit = False
def _spawn_task(self, task):
worker = Process(target=task)
worker.daemon = True
worker.start()
return worker
def launch_task(self, func, *args, **kwargs):
workers = kwargs.pop("workers", 1)
if workers < 1:
raise ValueError("workers is less than 1")
task = _Task(func, args, kwargs)
worker = self._spawn_task(task)
self._tasks[worker] = task
def quit(self):
with self._lock:
self._quit = True
def wait(self, reload=True):
while True:
time.sleep(1)
with self._lock:
if self._quit:
for worker in self._tasks:
worker.terminate()
return
try:
self._wait(reload)
except Exception:
LOG.error(traceback.format_exc())
raise
def _wait(self, reload):
removes = []
adds = []
for worker, task in self._tasks.items():
if not worker.is_alive():
LOG.warning("Process[%d] exited", worker.pid)
removes.append(worker)
if reload:
worker = self._spawn_task(task)
adds.append((worker, task))
for worker in removes:
self._tasks.pop(worker)
for worker, task in adds:
self._tasks[worker] = task
LOG.warning("Reload the task on Process[%d]: func=%s, args=%s, kwargs=%s",
worker.pid, task.func, task.args, task.kwargs)
if __name__ == "__main__":
def test_task(interval=10):
try:
for i in range(1, 10):
print(i, time.time())
time.sleep(interval)
except Exception as err:
print(err)
finally:
print("Exit ...")
m = ProcessManager()
m.launch_task(test_task, interval=1)
m.wait()
|
81556
|
import os
import tempfile
import numpy as np
from microscopium.screens.cellomics import SPIRAL_CLOCKWISE_RIGHT_25
from microscopium import preprocess as pre
from microscopium import io as mio
import pytest
import warnings
@pytest.fixture
def image_files():
# for clarity we define images as integer arrays in [0, 11) and
# divide by 10 later
i = np.array([[7, 4, 1, 1, 0],
[2, 5, 9, 6, 7],
[2, 3, 3, 8, 5],
[3, 0, 1, 7, 5],
[6, 0, 10, 1, 6]], np.uint8)
j = np.array([[1, 10, 0, 9, 0],
[3, 10, 4, 1, 1],
[4, 10, 0, 7, 4],
[9, 3, 2, 0, 7],
[1, 3, 3, 9, 3]], np.uint8)
k = np.array([[9, 1, 7, 7, 3],
[9, 1, 6, 2, 2],
[2, 8, 2, 0, 3],
[4, 3, 8, 9, 10],
[6, 0, 2, 3, 10]], np.uint8)
files = []
for im in [i, j, k]:
f, fn = tempfile.mkstemp(suffix='.png')
files.append(fn)
mio.imsave(fn, im)
yield files
for fn in files:
os.remove(fn)
def test_illumination_mean(image_files):
illum = pre.find_background_illumination(image_files, radius=1,
quantile=0.5)
illum_true = np.array([[161, 174, 188, 81, 94],
[174, 174, 81, 161, 94],
[174, 67, 161, 121, 161],
[134, 107, 107, 161, 215],
[134, 134, 134, 174, 215]], np.uint8)
np.testing.assert_array_almost_equal(illum, illum_true, decimal=1)
def test_color_stack(image_files):
images = list(map(mio.imread, image_files))
stack = pre.stack_channels(images[:2], [None, 1, 0])
np.testing.assert_equal(stack[0, 0], [0, 1, 7])
np.testing.assert_equal(stack[..., 2], images[0])
def conv(im):
return np.round(np.clip(im, 0, np.inf) * 255).astype(np.uint8)
@pytest.fixture
def image_files_noise(request):
"""Three sham images; one has no signal, one has an intensity artifact."""
r = np.random.RandomState(0)
shape = (5, 5)
# no signal
i = conv(0.01 * np.ones(shape, dtype=float) + 0.005 * r.randn(*shape))
# normal image
j = conv(0.5 * r.rand(*shape))
# blown-out corner
k = 0.5 * r.rand(*shape)
k[3:, 3:] = 1.0
k = conv(k)
files = []
for im in [i, j, k]:
f, fn = tempfile.mkstemp(suffix='.png')
files.append(fn)
mio.imsave(fn, im)
def cleanup():
for fn in files:
os.remove(fn)
request.addfinalizer(cleanup)
illum = 0.01 * np.ones(shape, dtype=float)
return files, illum
def test_correct_multiimage_illum(image_files_noise):
files, illum = image_files_noise
with mio.temporary_file('.tif') as out_fn:
ims = pre.correct_multiimage_illumination(files, illum, (2 / 25), 0)
i, j, k = list(ims)
# 1. check noise is not blown out in i
assert not np.any(i > 10)
# 2. check blown out corner in k has not suppressed all other values
assert np.median(k) > 100
cellomics_pattern = "MFGTMP_150406100001_A01f{0:02d}d0.TIF"
missing_test_fns = [
([cellomics_pattern.format(i) for i in range(25)], []),
([cellomics_pattern.format(i) for i in range(25)], [1, 13])
]
# delete "images" with fields 1 and 13 from second set of
# image filesnames
missing_test_fns[1][0].remove(cellomics_pattern.format(1))
missing_test_fns[1][0].remove(cellomics_pattern.format(13))
@pytest.mark.parametrize("fns, expected", missing_test_fns)
def test_find_missing_fields(fns, expected):
actual = pre.find_missing_fields(fns)
np.testing.assert_array_equal(actual, expected)
# create a list of parameters for testing the create missing mask files
# each entry in the tuple represents the fields: missing, order, rows, cols
# and expected (the expected output from the function)
missing_mask_test = [
([], [[0, 1, 2]], 10, 5, np.ones((10, 15), dtype=np.bool)),
([0, 5], [[0, 1, 2], [4, 5, 6]], 5, 10, np.ones((10, 30), dtype=np.bool)),
([3, 4], [[0, 1], [2, 3], [4, 5]], 10, 5, np.ones((30, 10), dtype=np.bool))
]
# insert False to missing areas of expected output
missing_mask_test[1][4][0:5, 0:10] = False
missing_mask_test[1][4][5:10, 10:20] = False
missing_mask_test[2][4][10:20, 5:10] = False
missing_mask_test[2][4][20:30, 0:5] = False
# pass the set of list parameters to the test_create_missing_mask
# function. the test wil run against every of parameters in the
# missing_mask_test list
@pytest.mark.parametrize("missing, order, rows, cols, expected",
missing_mask_test)
def test_create_missing_mask(missing, order, rows, cols, expected):
actual = pre.create_missing_mask(missing, order, rows, cols)
np.testing.assert_array_equal(actual, expected)
@pytest.fixture
def test_image_files_montage(request):
def make_test_montage_files(missing_fields):
shape = (2, 2)
fields = list(range(0, 25))
for missing_field in missing_fields:
fields.remove(missing_field)
ims = [np.ones(shape, np.uint8) * i for i in fields]
files = []
for field, im in zip(fields, ims):
prefix = "MFGTMP_140206180002_A01f{0:02d}d0".format(field)
f, fn = tempfile.mkstemp(prefix=prefix, suffix=".tif")
files.append(fn)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mio.imsave(fn, im)
def cleanup():
for file in files:
os.remove(file)
request.addfinalizer(cleanup)
return files
return make_test_montage_files
def test_montage_with_missing(test_image_files_montage):
files = test_image_files_montage(missing_fields=[20])
montage, mask, number_missing = \
pre.montage_with_missing(files, order=SPIRAL_CLOCKWISE_RIGHT_25,
re_string=r'.*_[A-P]\d{2}f(\d{2})d0',
re_group=1)
expect_montage = np.array([[0, 0, 21, 21, 22, 22, 23, 23, 24, 24],
[0, 0, 21, 21, 22, 22, 23, 23, 24, 24],
[19, 19, 6, 6, 7, 7, 8, 8, 9, 9],
[19, 19, 6, 6, 7, 7, 8, 8, 9, 9],
[18, 18, 5, 5, 0, 0, 1, 1, 10, 10],
[18, 18, 5, 5, 0, 0, 1, 1, 10, 10],
[17, 17, 4, 4, 3, 3, 2, 2, 11, 11],
[17, 17, 4, 4, 3, 3, 2, 2, 11, 11],
[16, 16, 15, 15, 14, 14, 13, 13, 12, 12],
[16, 16, 15, 15, 14, 14, 13, 13, 12, 12]],
np.uint8)
np.testing.assert_array_equal(expect_montage, montage)
def test_montage_with_missing_mask(test_image_files_montage):
files = test_image_files_montage(missing_fields=[3, 8])
montage, mask, number_missing = \
pre.montage_with_missing(files, order=SPIRAL_CLOCKWISE_RIGHT_25,
re_string=r'.*_[A-P]\d{2}f(\d{2})d0',
re_group=1)
expected_mask = np.ones((10, 10), np.bool)
expected_mask[6:8, 4:6] = False
expected_mask[2:4, 6:8] = False
np.testing.assert_array_equal(expected_mask, mask)
def test_montage_with_missing_number_missing(test_image_files_montage):
files = test_image_files_montage(missing_fields=[10, 11, 12])
montage, mask, number_missing = \
pre.montage_with_missing(files, order=SPIRAL_CLOCKWISE_RIGHT_25,
re_string=r'.*_[A-P]\d{2}f(\d{2})d0',
re_group=1)
assert number_missing == 3
if __name__ == '__main__':
pytest.main()
|
81585
|
from met_brewer.palettes import (
MET_PALETTES, COLORBLIND_PALETTES_NAMES, COLORBLIND_PALETTES,
met_brew, export, is_colorblind_friendly
)
MET_PALETTES
COLORBLIND_PALETTES_NAMES
COLORBLIND_PALETTES
met_brew
export
is_colorblind_friendly
|
81607
|
from django.contrib import admin
from django.conf import settings
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
# Register your models here.
from .models import CustomerInfo, ContactInfo, AddressInfo
@admin.register(CustomerInfo)
class CustomerInfoAdmin(admin.ModelAdmin):
pass
@admin.register(ContactInfo)
class ContactInfoAdmin(admin.ModelAdmin):
pass
@admin.register(AddressInfo)
class AddressInfoAdmin(admin.ModelAdmin):
pass
|
81633
|
from nutshell.algorithms.information_retrieval import ClassicalIR
from nutshell.algorithms.ranking import BaseRanker, TextRank
from nutshell.algorithms.similarity import BaseSimilarityAlgo, BM25Plus
from nutshell.preprocessing.cleaner import NLTKCleaner
from nutshell.preprocessing.preprocessor import TextPreProcessor
class Summarizer:
def __init__(
self, preprocessor: TextPreProcessor = TextPreProcessor(),
similarity_algo: BaseSimilarityAlgo = BM25Plus(),
ranker: BaseRanker = TextRank(),
ir: ClassicalIR = ClassicalIR()
):
"""
Summarizer helps to summarise a corpus with the given reduction ratio.
:param preprocessor: Text preprocessor algorithm. Default - TextPreProcessor.
:param similarity_algo: Algorithm to be used for finding similarity between docs. Default - BM25Plus.
:param ranker: Ranking algorithm to be used to rank the docs. Default - TextRank.
:param ir: Information retrieval algorithm to be used to extract tf, idf and other necessary measures.
Default - ClassicalIR.
"""
self.__preprocessor = preprocessor
self.__similarity_algo = similarity_algo
self.__ranker = ranker
self.__ir = ir
def __repr__(self):
return f"""Summarizer(preprocessor={self.__preprocessor},
similarity_algo={self.__similarity_algo},
ranker={self.__ranker},
ir={self.__ir}
)"""
def summarise(self, corpus, reduction_ratio=0.70, preserve_order=False):
"""
Returns the summarised the text based on given reduction ratio
:param corpus: Text to be summarized
:param reduction_ratio: Reduction ratio expected for the output text. i.e if ratio=0.5 then half the number
of sentence are returned
:param preserve_order: If True, then sentence order is preserved
:return: Summarised text
"""
# Model Pipeline
# Preprocessing
original_token, cleaned_tokens = self.__preprocessor.preprocess(corpus)
# Information retrieval
_idf = self.__ir.calculate_idf(cleaned_tokens)
# Similarity and Ranking
similarity_matrix = self.__similarity_algo.similarity_matrix(cleaned_tokens, _idf)
scores = self.__ranker.get_ranking_scores(similarity_matrix)
summarized_content = self.__ranker.get_top(scores, original_token, reduction_ratio=reduction_ratio,
preserve_order=preserve_order)
return summarized_content
class KeywordExtractor:
def __init__(
self,
preprocessor: TextPreProcessor = TextPreProcessor(cleaner=NLTKCleaner(skip_stemming=True)),
ir: ClassicalIR = ClassicalIR()
):
"""
:param preprocessor: Text preprocessor algorithm. Default - TextPreProcessor.
:param ir: Information retrieval algorithm to be used to extract tf, idf and other necessary measures.
Default - ClassicalIR.
"""
self.__preprocessor = preprocessor
self.__ir = ir
def __repr__(self):
return f"KeywordExtractor(preprocessor={self.__preprocessor}, ir={self.__ir})"
def extract_keywords(self, corpus, count=5, raw=False):
original_token, tokens = self.__preprocessor.preprocess(corpus)
tf = self.__ir.calculate_tf(tokens)
idf = self.__ir.calculate_idf(tokens)
keywords = ClassicalIR.cumulative_weight(tf, idf, order=not raw)
return dict(zip(keywords.keys(), keywords.values())) if raw else list(zip(*keywords))[0][:count]
|
81718
|
from itertools import combinations
from solvatore import Solvatore
from cipher_description import CipherDescription
from ciphers import present
cipher = present.present
rounds = 9
solver = Solvatore()
solver.load_cipher(cipher)
solver.set_rounds(rounds)
# Look over all combination for one non active bit
for bits in combinations(range(64), 1):
constant_bits = bits
active_bits = {i for i in range(64) if i not in constant_bits}
# Find all balanced bits
balanced_bits = []
for i in range(cipher.state_size):
if solver.is_bit_balanced(i, rounds, active_bits):
balanced_bits.append(i)
if len(balanced_bits) > 0:
print("Found distinguisher!")
print("Constant Bits: ", len(constant_bits),constant_bits)
print("Balanced Bits: ", len(balanced_bits),balanced_bits)
|
81778
|
import logging
def pytest_addoption(parser):
parser.addoption(
"--no-linting",
action="store_true",
default=False,
help="Skip linting checks",
)
parser.addoption(
"--linting",
action="store_true",
default=False,
help="Only run linting checks",
)
def pytest_collection_modifyitems(session, config, items):
if config.getoption("--no-linting"):
items[:] = [item for item in items if not item.get_closest_marker('flake8')]
if config.getoption("--linting"):
items[:] = [item for item in items if item.get_closest_marker('flake8')]
def pytest_configure(config):
"""Flake8 is very verbose by default. Silence it."""
logging.getLogger("flake8").setLevel(logging.WARNING)
|
81791
|
from dispatch.plugins.bases import StoragePlugin
class TestStoragePlugin(StoragePlugin):
title = "Dispatch Test Plugin - Storage"
slug = "test-storage"
def get(self, **kwargs):
return
def create(self, items, **kwargs):
return
def update(self, items, **kwargs):
return
def delete(self, items, **kwargs):
return
def list(self, **kwargs):
return
def add_participant(self, items, **kwargs):
return
def remove_participant(self, items, **kwargs):
return
def add_file(self, **kwargs):
return
def delete_file(self, **kwargs):
return
def move_file(self, **kwargs):
return
def list_files(self, **kwargs):
return
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.