text
stringlengths 2
999k
|
|---|
from flask import Flask, abort, request, Response, stream_with_context, \
jsonify
from flask_restx import Api, Resource
from flask_jwt_extended import create_access_token
import os
import requests
import json
import psycopg2
from qwc_services_core.auth import auth_manager, optional_auth, get_auth_user
from qwc_services_core.tenant_handler import TenantHandler
from qwc_services_core.runtime_config import RuntimeConfig
from external_ows_layers import ExternalOwsLayers
# Flask application
app = Flask(__name__)
api = Api(app, version='1.0', title='Print API',
description='API for QWC Print service',
default_label='Print operations', doc='/api/')
# disable verbose 404 error message
app.config['ERROR_404_HELP'] = False
auth = auth_manager(app, api)
tenant_handler = TenantHandler(app.logger)
config_handler = RuntimeConfig("print", app.logger)
# routes
@api.route('/<mapid>')
@api.param('mapid', 'The WMS service map name')
class Print(Resource):
@api.doc('print')
@optional_auth
@api.param('DPI', 'The print dpi', _in='formData')
@api.param('SRS', 'The SRS of the specified map extent', _in='formData')
@api.param('TEMPLATE', 'The print template', _in='formData')
@api.param('FORMAT', 'The file format for the print output', _in='formData')
@api.param('TRANSPARENT', 'Whether to use transparent background if possible', _in='formData')
@api.param('LAYERS', 'The layers list for opacities', _in='formData')
@api.param('OPACITIES', 'The opacities of the layers to print', _in='formData')
@api.param('COLORS', 'The colors list for external WFS layers', _in='formData')
@api.param('map0:LAYERS', 'The layers to print', _in='formData')
@api.param('map0:SCALE', 'The scale for the specified map', _in='formData')
@api.param('map0:EXTENT', 'The extent for the specified map', _in='formData')
@api.param('map0:ROTATION', 'The rotation for the specified map', _in='formData')
@api.param('map0:GRID_INTERVAL_X', 'The x interval for the grid of the specified map', _in='formData')
@api.param('map0:GRID_INTERVAL_Y', 'The y interval for the grid of the specified map', _in='formData')
@api.param('map0:HIGHLIGHT_GEOM', 'The geometries to add to the specified map', _in='formData')
@api.param('map0:HIGHLIGHT_SYMBOL', 'The styles for the highlight geometries', _in='formData')
@api.param('map0:HIGHLIGHT_LABELSTRING', 'The label texts for the highlight geometries', _in='formData')
@api.param('map0:HIGHLIGHT_LABELCOLOR', 'The label colors for the highlight geometries', _in='formData')
@api.param('map0:HIGHLIGHT_LABELBUFFERCOLOR', 'The label buffer colors for the highlight geometries', _in='formData')
@api.param('map0:HIGHLIGHT_LABELBUFFERSIZE', 'The label buffer sizes for the highlight geometries', _in='formData')
@api.param('map0:HIGHLIGHT_LABELSIZE', 'The label sizes for the highlight geometries', _in='formData')
@api.param('CONTENT_DISPOSITION', 'Content disposition mode, either inline or attachment', _in='formData')
def post(self, mapid):
"""Submit query
Return map print
"""
tenant = tenant_handler.tenant()
config = config_handler.tenant_config(tenant)
identity = get_auth_user()
ogc_service_url = config.get(
'ogc_service_url', 'http://localhost:5013/')
print_pdf_filename = config.get('print_pdf_filename')
qgs_postfix = config.get('qgs_postfix', '')
qgis_server_version = config.get('qgis_server_version', '2.18.19')
label_queries_config = config.get('label_queries', [])
# TODO: read resources
post_params = dict(request.form.items())
app.logger.info("POST params: %s" % post_params)
content_disposition = post_params.get('CONTENT_DISPOSITION', 'attachment')
if 'CONTENT_DISPOSITION' in post_params:
del post_params['CONTENT_DISPOSITION']
params = {
"SERVICE": "WMS",
"VERSION": "1.3.0",
"REQUEST": "GetPrint"
}
params.update(post_params)
# normalize parameter keys to upper case
params = {k.upper(): v for k, v in params.items()}
# Search layers parameter
layerparam = None
for key, value in params.items():
if key.endswith(":LAYERS"):
layerparam = key
break
if not layerparam:
abort(400, "Missing <mapName>:LAYERS parameter")
template = params.get('TEMPLATE')
layers = params.get(layerparam, '').split(',')
opacities = params.get('OPACITIES', [])
if opacities:
opacities = opacities.split(',')
colors = params.get('COLORS', '').split(',')
# extract any external WMS and WFS layers
external_ows_layers = ExternalOwsLayers(
qgis_server_version, app.logger)
external_ows_layers.update_params(params, layerparam)
# add fields from custom label queries
for label_config in label_queries_config:
conn = psycopg2.connect(label_config["db_url"])
sql = label_config["query"].replace(
"$username$", "'%s'" % (identity or "")
)
cursor = conn.cursor()
cursor.execute(sql)
row = cursor.fetchone()
cursor.close()
if row:
for idx, param in enumerate(label_config['params']):
params[param] = row[idx]
conn.close()
# forward to OGC service
headers = {}
if identity:
# add authorization headers for forwarding identity
app.logger.debug(
"Adding authorization headers for identity '%s'" % identity
)
access_token = create_access_token(identity)
headers['Authorization'] = "Bearer " + access_token
if tenant_handler.tenant_header:
headers[tenant_handler.tenant_header] = request.headers.get(
tenant_handler.tenant_header)
url = ogc_service_url.rstrip("/") + "/" + mapid + qgs_postfix
req = requests.post(url, timeout=120, data=params, headers=headers)
app.logger.info("Forwarding request to %s\n%s" % (req.url, params))
response = Response(
stream_with_context(
req.iter_content(chunk_size=1024)
), status=req.status_code
)
response.headers['content-type'] = req.headers['content-type']
if req.headers['content-type'] == 'application/pdf':
filename = print_pdf_filename or (mapid + '.pdf')
response.headers['content-disposition'] = content_disposition + \
'; filename=' + filename
return response
""" readyness probe endpoint """
@app.route("/ready", methods=['GET'])
def ready():
return jsonify({"status": "OK"})
""" liveness probe endpoint """
@app.route("/healthz", methods=['GET'])
def healthz():
return jsonify({"status": "OK"})
# local webserver
if __name__ == '__main__':
print("Starting GetPrint service...")
from flask_cors import CORS
CORS(app)
app.run(host='localhost', port=5019, debug=True)
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.utils.translation import gettext as _
from core import models
class UserAdmin(BaseUserAdmin):
ordering = ['id']
list_display = ['email', 'name']
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Personal Info'), {'fields': ('name',)}),
(
_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser')}
),
(_('Important Dates'), {'fields': ('last_login',)})
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')
}),
)
admin.site.register(models.User, UserAdmin)
|
try:
num1= int(input(" Enter the first number: "))
num2= int(input(" Enter the second number: "))
total = num1 / num2
print("The division is ", total)
except ValueError as msg1:
print("The input is not valid")
except:
print("Default except block")
|
# uncompyle6 version 3.2.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: encodings.cp856
import codecs
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
def getregentry():
return codecs.CodecInfo(name='cp856', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
decoding_table = u'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\x7f\u05d0\u05d1\u05d2\u05d3\u05d4\u05d5\u05d6\u05d7\u05d8\u05d9\u05da\u05db\u05dc\u05dd\u05de\u05df\u05e0\u05e1\u05e2\u05e3\u05e4\u05e5\u05e6\u05e7\u05e8\u05e9\u05ea\ufffe\xa3\ufffe\xd7\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\xae\xac\xbd\xbc\ufffe\xab\xbb\u2591\u2592\u2593\u2502\u2524\ufffe\ufffe\ufffe\xa9\u2563\u2551\u2557\u255d\xa2\xa5\u2510\u2514\u2534\u252c\u251c\u2500\u253c\ufffe\ufffe\u255a\u2554\u2569\u2566\u2560\u2550\u256c\xa4\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\u2518\u250c\u2588\u2584\xa6\ufffe\u2580\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\xb5\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\ufffe\xaf\xb4\xad\xb1\u2017\xbe\xb6\xa7\xf7\xb8\xb0\xa8\xb7\xb9\xb3\xb2\u25a0\xa0'
encoding_table = codecs.charmap_build(decoding_table)
|
"""
This is the transform file
which include much strategy
for the data augmentation.
"""
import math
import random
import torchvision.transforms as T
from .transforms import RandomErasing
__author__ = ""
def build_transforms(cfg, is_train=True):
"""Here is the function
build a normal transforms
by torchvision.
1. image normalization
e.g. T.Normalize
2. image resize
e.g. T.Resize
3. random horizontal filp
e.g. T.RandomHorizontalFlip
4. image padding
e.g. T.Pad
5. random crop
e.g. T.RandomCrop
6. to tensor
e.g. T.ToTensor
7. RandomErasing
total example:
normalize_transform = T.Normalize(mean=cfg.INPUT.PIXEL_MEAN,
std=cfg.INPUT.PIXEL_STD)
if is_train:
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TRAIN),
T.RandomHorizontalFlip(p=cfg.INPUT.PROB),
T.Pad(cfg.INPUT.PADDING),
T.RandomCrop(cfg.INPUT.SIZE_TRAIN),
T.ToTensor(),
normalize_transform,
RandomErasing(probability=cfg.INPUT.RE_PROB, mean=cfg.INPUT.PIXEL_MEAN)
])
else:
transform = T.Compose([
T.Resize(cfg.INPUT.SIZE_TEST),
T.ToTensor(),
normalize_transform
])
return transform
Arguments:
cfg {[type]} -- [description]
Keyword Arguments:
is_train {bool} -- [description] (default: {True})
Returns:
[type] -- [description]
"""
pass
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
example:
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) >= self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img
"""
pass
|
"""
Trains a Pixel-CNN++ generative model on CIFAR-10 or Tiny ImageNet data.
Uses multiple GPUs, indicated by the flag --nr-gpu
Example usage:
CUDA_VISIBLE_DEVICES=0,1,2,3 python train_double_cnn.py --nr_gpu 4
"""
import os
import sys
import time
import json
import argparse
import numpy as np
import tensorflow as tf
import scipy.misc
import pixel_cnn_pp.nn as nn
import pixel_cnn_pp.plotting as plotting
from pixel_cnn_pp.model import model_spec, model_spec_encoder
import data.cifar10_data as cifar10_data
import data.imagenet_data as imagenet_data
from pixel_cnn_pp.encoder import compute_mutual_information, ComputeLL
# -----------------------------------------------------------------------------
parser = argparse.ArgumentParser()
# data I/O
parser.add_argument('-i', '--data_dir', type=str, default='data', help='Location for the dataset')
parser.add_argument('-o', '--save_dir', type=str, default='elbo', help='Location for parameter checkpoints and samples')
parser.add_argument('-d', '--data_set', type=str, default='cifar', help='Can be either cifar|imagenet')
parser.add_argument('-t', '--save_interval', type=int, default=1, help='Every how many epochs to write checkpoint/samples?')
parser.add_argument('-r', '--load_params', dest='load_params', action='store_true', help='Restore training from previous model checkpoint?')
parser.add_argument('-name', '--name', type=str, default='elbo', help='Name of the network')
# model
parser.add_argument('-q', '--nr_resnet', type=int, default=5, help='Number of residual blocks per stage of the model')
parser.add_argument('-n', '--nr_filters', type=int, default=160, help='Number of filters to use across the model. Higher = larger model.')
parser.add_argument('-m', '--nr_logistic_mix', type=int, default=10, help='Number of logistic components in the mixture. Higher = more flexible model')
parser.add_argument('-z', '--resnet_nonlinearity', type=str, default='concat_elu', help='Which nonlinearity to use in the ResNet layers. One of "concat_elu", "elu", "relu" ')
parser.add_argument('-c', '--class_conditional', dest='class_conditional', action='store_true', help='Condition generative model on labels?')
parser.add_argument('-ae', '--use_autoencoder', dest='use_autoencoder', action='store_true', help='Use autoencoders?')
parser.add_argument('-reg', '--reg_type', type=str, default='elbo', help='Type of regularization to use for autoencoder')
parser.add_argument('-cs', '--chain_step', type=int, default=10, help='Steps to run Markov chain for sampling')
# optimization
parser.add_argument('-l', '--learning_rate', type=float, default=0.001, help='Base learning rate')
parser.add_argument('-e', '--lr_decay', type=float, default=0.999995, help='Learning rate decay, applied every step of the optimization')
parser.add_argument('-b', '--batch_size', type=int, default=12, help='Batch size during training per GPU')
parser.add_argument('-a', '--init_batch_size', type=int, default=80, help='How much data to use for data-dependent initialization.')
parser.add_argument('-p', '--dropout_p', type=float, default=0.5, help='Dropout strength (i.e. 1 - keep_prob). 0 = No dropout, higher = more dropout.')
parser.add_argument('-x', '--max_epochs', type=int, default=5000, help='How many epochs to run in total?')
parser.add_argument('-g', '--nr_gpu', type=int, default=2, help='How many GPUs to distribute the training across?')
parser.add_argument('-gid', '--gpu_id', type=str, default='', help='Which GPUs to use')
# evaluation
parser.add_argument('--polyak_decay', type=float, default=0.9995, help='Exponential decay rate of the sum of previous model iterates during Polyak averaging')
# reproducibility
parser.add_argument('-s', '--seed', type=int, default=1, help='Random seed to use')
args = parser.parse_args()
print('input args:\n', json.dumps(vars(args), indent=4, separators=(',',':'))) # pretty print args
# python train.py --use_autoencoder --save_dir=elbo --name=elbo --reg_type=elbo
# python train.py --use_autoencoder --save_dir=no_reg --name=no_reg --reg_type=no_reg
if args.gpu_id != "":
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
latent_dim = 20
args.latent_dim = latent_dim
# -----------------------------------------------------------------------------
# fix random seed for reproducibility
rng = np.random.RandomState(args.seed)
tf.set_random_seed(args.seed)
# initialize data loaders for train/test splits
if args.data_set == 'imagenet' and args.class_conditional:
raise("We currently don't have labels for the small imagenet data set")
DataLoader = {'cifar':cifar10_data.DataLoader, 'imagenet':imagenet_data.DataLoader}[args.data_set]
train_data = DataLoader(args.data_dir, 'train', args.batch_size * args.nr_gpu, rng=rng, shuffle=True, return_labels=args.class_conditional)
test_data = DataLoader(args.data_dir, 'test', args.batch_size * args.nr_gpu, shuffle=False, return_labels=args.class_conditional)
obs_shape = train_data.get_observation_size() # e.g. a tuple (32,32,3)
assert len(obs_shape) == 3, 'assumed right now'
# data place holders
x_init = tf.placeholder(tf.float32, shape=(args.init_batch_size,) + obs_shape)
xs = [tf.placeholder(tf.float32, shape=(args.batch_size, ) + obs_shape) for i in range(args.nr_gpu)]
encoder_x_init = tf.placeholder(tf.float32, shape=(args.init_batch_size,) + obs_shape)
encoder_x = [tf.placeholder(tf.float32, shape=(args.batch_size, ) + obs_shape) for i in range(args.nr_gpu)]
# if the model is class-conditional we'll set up label placeholders + one-hot encodings 'h' to condition on
if args.class_conditional:
num_labels = train_data.get_num_labels()
y_init = tf.placeholder(tf.int32, shape=(args.init_batch_size,))
h_init = tf.one_hot(y_init, num_labels)
y_sample = np.split(np.mod(np.arange(args.batch_size*args.nr_gpu), num_labels), args.nr_gpu)
h_sample = [tf.one_hot(tf.Variable(y_sample[i], trainable=False), num_labels) for i in range(args.nr_gpu)]
ys = [tf.placeholder(tf.int32, shape=(args.batch_size,)) for i in range(args.nr_gpu)]
hs = [tf.one_hot(ys[i], num_labels) for i in range(args.nr_gpu)]
elif args.use_autoencoder:
# h_init = tf.placeholder(tf.float32, shape=(args.init_batch_size, latent_dim))
h_sample = [tf.placeholder(tf.float32, shape=(args.batch_size, latent_dim)) for i in range(args.nr_gpu)]
else:
h_init = None
h_sample = [None] * args.nr_gpu
hs = h_sample
# create the model
model_opt = { 'nr_resnet': args.nr_resnet, 'nr_filters': args.nr_filters, 'nr_logistic_mix': args.nr_logistic_mix, 'resnet_nonlinearity': args.resnet_nonlinearity }
model = tf.make_template('model', model_spec)
if args.use_autoencoder:
encoder_opt = model_opt.copy()
encoder_opt['reg_type'] = args.reg_type
encoder_opt['latent_dim'] = latent_dim
encoder_model = tf.make_template('encoder', model_spec_encoder)
# run once for data dependent initialization of parameters
if args.use_autoencoder:
encoder = encoder_model(encoder_x_init, init=True, dropout_p=args.dropout_p, **encoder_opt)
gen_par = model(x_init, encoder.pred, init=True, dropout_p=args.dropout_p, **model_opt)
else:
gen_par = model(x_init, h_init, init=True, dropout_p=args.dropout_p, **model_opt)
# keep track of moving average
all_params = tf.trainable_variables()
ema = tf.train.ExponentialMovingAverage(decay=args.polyak_decay)
maintain_averages_op = tf.group(ema.apply(all_params))
# get loss gradients over multiple GPUs
grads = []
loss_gen = []
loss_gen_reg = []
loss_gen_elbo = []
loss_gen_test = []
for i in range(args.nr_gpu):
with tf.device('/gpu:%d' % i):
# train
if args.use_autoencoder:
encoder = encoder_model(encoder_x[i], ema=None, dropout_p=args.dropout_p, **encoder_opt)
gen_par = model(xs[i], encoder.pred, ema=None, dropout_p=args.dropout_p, **model_opt)
loss_gen_reg.append(encoder.reg_loss)
loss_gen_elbo.append(encoder.elbo_loss)
else:
gen_par = model(xs[i], hs[i], ema=None, dropout_p=args.dropout_p, **model_opt)
loss_gen.append(nn.discretized_mix_logistic_loss(xs[i], gen_par))
# gradients
if args.use_autoencoder:
total_loss = loss_gen[i] + loss_gen_reg[i]
else:
total_loss = loss_gen[i]
grads.append(tf.gradients(total_loss, all_params))
# test
if args.use_autoencoder:
encoder = encoder_model(encoder_x[i], ema=ema, dropout_p=0., **encoder_opt)
gen_par = model(xs[i], encoder.pred, ema=ema, dropout_p=0., **model_opt)
else:
gen_par = model(xs[i], hs[i], ema=ema, dropout_p=0., **model_opt)
loss_gen_test.append(nn.discretized_mix_logistic_loss(xs[i], gen_par))
# add losses and gradients together and get training updates
tf_lr = tf.placeholder(tf.float32, shape=[])
with tf.device('/gpu:0'):
for i in range(1,args.nr_gpu):
loss_gen[0] += loss_gen[i]
loss_gen_test[0] += loss_gen_test[i]
if args.use_autoencoder:
loss_gen_reg[0] += loss_gen_reg[i]
loss_gen_elbo[0] += loss_gen_elbo[i]
for j in range(len(grads[0])):
grads[0][j] += grads[i][j]
# training op
tf.summary.scalar('ll_loss', loss_gen[0])
if args.use_autoencoder:
tf.summary.scalar('reg', loss_gen_reg[0])
tf.summary.scalar('elbo', loss_gen_elbo[0])
optimizer = tf.group(nn.adam_updates(all_params, grads[0], lr=tf_lr, mom1=0.95, mom2=0.9995), maintain_averages_op)
# convert loss to bits/dim
bits_per_dim = loss_gen[0]/(args.nr_gpu*np.log(2.)*np.prod(obs_shape)*args.batch_size)
bits_per_dim_test = loss_gen_test[0]/(args.nr_gpu*np.log(2.)*np.prod(obs_shape)*args.batch_size)
tf.summary.scalar('ll_bits_per_dim', bits_per_dim)
# sample from the model
new_x_gen = []
encoder_list = []
for i in range(args.nr_gpu):
with tf.device('/gpu:%d' % i):
if args.use_autoencoder:
encoder = encoder_model(encoder_x[i], ema=ema, dropout_p=0, **encoder_opt)
gen_par = model(xs[i], h_sample[i], ema=ema, dropout_p=0, **model_opt)
encoder_list.append(encoder)
else:
gen_par = model(xs[i], h_sample[i], ema=ema, dropout_p=0, **model_opt)
new_x_gen.append(nn.sample_from_discretized_mix_logistic(gen_par, args.nr_logistic_mix))
compute_ll = ComputeLL(latent_dim)
def sample_from_model(sess):
x_gen = [np.zeros((args.batch_size,) + obs_shape, dtype=np.float32) for i in range(args.nr_gpu)]
for yi in range(obs_shape[0]):
for xi in range(obs_shape[1]):
new_x_gen_np = sess.run(new_x_gen, {xs[i]: x_gen[i] for i in range(args.nr_gpu)})
for i in range(args.nr_gpu):
x_gen[i][:,yi,xi,:] = new_x_gen_np[i][:,yi,xi,:]
return np.concatenate(x_gen, axis=0)
def sample_from_decoder_prior(sess):
x_gen = [np.zeros((args.batch_size,) + obs_shape, dtype=np.float32) for i in range(args.nr_gpu)]
latent_code = [np.random.normal(size=(args.batch_size, latent_dim)) for i in range(args.nr_gpu)]
for yi in range(obs_shape[0]):
for xi in range(obs_shape[1]):
feed_dict = {xs[i]: x_gen[i] for i in range(args.nr_gpu)}
feed_dict.update({h_sample[i]: latent_code[i] for i in range(args.nr_gpu)})
new_x_gen_np = sess.run(new_x_gen, feed_dict)
for i in range(args.nr_gpu):
x_gen[i][:,yi,xi,:] = new_x_gen_np[i][:,yi,xi,:]
return np.concatenate(x_gen, axis=0)
def sample_from_markov_chain(sess, initial=None):
history = []
if initial is None:
encoder_current = [np.random.uniform(0.0, 1.0, (args.batch_size,) + obs_shape) for i in range(args.nr_gpu)]
else:
encoder_current = np.split(initial, args.nr_gpu)
latent_op = [encoder.pred for encoder in encoder_list]
num_steps = args.chain_step
history.append(np.concatenate(encoder_current, axis=0))
for step in range(num_steps):
start_time = time.time()
feed_dict = {encoder_x[i]: encoder_current[i] for i in range(args.nr_gpu)}
latent_code = sess.run(latent_op, feed_dict)
x_gen = [np.zeros((args.batch_size,) + obs_shape, dtype=np.float32) for i in range(args.nr_gpu)]
for yi in range(obs_shape[0]):
for xi in range(obs_shape[1]):
feed_dict = {xs[i]: x_gen[i] for i in range(args.nr_gpu)}
feed_dict.update({h_sample[i]: latent_code[i] for i in range(args.nr_gpu)})
new_x_gen_np = sess.run(new_x_gen, feed_dict)
for i in range(args.nr_gpu):
x_gen[i][:,yi,xi,:] = new_x_gen_np[i][:,yi,xi,:]
history.append(np.concatenate(x_gen, axis=0))
encoder_current = x_gen
print("%d (%fs)" % (step, time.time() - start_time))
sys.stdout.flush()
return history
def plot_markov_chain(history):
canvas = np.zeros((args.nr_gpu*args.batch_size*obs_shape[0], len(history)*obs_shape[1], obs_shape[2]))
for i in range(args.nr_gpu*args.batch_size):
for j in range(len(history)):
canvas[i*obs_shape[0]:(i+1)*obs_shape[0], j*obs_shape[1]:(j+1)*obs_shape[1], :] = history[j][i]
return canvas
# init & save
initializer = tf.initialize_all_variables()
saver = tf.train.Saver()
all_summary = tf.summary.merge_all()
writer = tf.summary.FileWriter(logdir=args.save_dir)
file_logger = open(os.path.join(args.save_dir, 'train_log'), 'w')
# turn numpy inputs into feed_dict for use with tensorflow
def make_feed_dict(data, init=False):
if type(data) is tuple:
x,y = data
else:
x = data
y = None
x = np.cast[np.float32]((x - 127.5) / 127.5) # input to pixelCNN is scaled from uint8 [0,255] to float in range [-1,1]
if init:
feed_dict = {x_init: x}
if args.use_autoencoder:
feed_dict.update({encoder_x_init: x})
if y is not None:
feed_dict.update({y_init: y})
else:
x = np.split(x, args.nr_gpu)
feed_dict = {xs[i]: x[i] for i in range(args.nr_gpu)}
if args.use_autoencoder:
feed_dict.update({encoder_x[i]: x[i] for i in range(args.nr_gpu)})
if y is not None:
y = np.split(y, args.nr_gpu)
feed_dict.update({ys[i]: y[i] for i in range(args.nr_gpu)})
return feed_dict
# //////////// perform training //////////////
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
print('starting training')
test_bpd = []
lr = args.learning_rate
global_step = 0
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9, allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
for epoch in range(args.max_epochs):
# init
if epoch == 0:
feed_dict = make_feed_dict(train_data.next(args.init_batch_size), init=True) # manually retrieve exactly init_batch_size examples
train_data.reset() # rewind the iterator back to 0 to do one full epoch
sess.run(initializer, feed_dict)
print('initializing the model...')
if args.load_params:
ckpt_file = args.save_dir + '/params_' + args.data_set + '.ckpt'
print('restoring parameters from', ckpt_file)
saver.restore(sess, ckpt_file)
# Compute mutual information
file_logger.write("%d " % epoch)
if args.use_autoencoder:
mutual_info = compute_mutual_information(data=train_data, args=args, sess=sess, encoder_list=encoder_list, ll_compute=compute_ll)
train_data.reset()
file_logger.write("%f " % mutual_info)
file_logger.flush()
# generate samples from the model
if args.use_autoencoder and epoch % 20 == 0:
print("Generating MC")
start_time = time.time()
initial = np.random.uniform(0.0, 1.0, (args.batch_size * args.nr_gpu,) + obs_shape)
for mc_step in range(100):
sample_history = sample_from_markov_chain(sess, initial)
initial = sample_history[-1]
sample_plot = plot_markov_chain(sample_history)
scipy.misc.imsave(os.path.join(args.save_dir, '%s_mc%d.png' % (args.data_set, mc_step)), sample_plot)
print("Finished, time elapsed %fs" % (time.time() - start_time))
exit(0)
# generate samples from the model
if epoch % 2 == 0:
print("Generating samples")
start_time = time.time()
if args.use_autoencoder:
sample_x = sample_from_decoder_prior(sess)
else:
sample_x = sample_from_model(sess)
img_tile = plotting.img_tile(sample_x[:int(np.floor(np.sqrt(args.batch_size * args.nr_gpu)) ** 2)],
aspect_ratio=1.0, border_color=1.0, stretch=True)
img = plotting.plot_img(img_tile, title=args.data_set + ' samples')
plotting.plt.savefig(os.path.join(args.save_dir, '%s_sample%d.png' % (args.data_set, epoch)))
plotting.plt.close('all')
print("Finished, time elapsed %fs" % (time.time() - start_time))
begin = time.time()
# train for one epoch
train_losses = []
batch_c = 10
for d in train_data:
feed_dict = make_feed_dict(d)
# forward/backward/update model on each gpu
lr *= args.lr_decay
feed_dict.update({ tf_lr: lr })
l, _, summaries = sess.run([bits_per_dim, optimizer, all_summary], feed_dict)
train_losses.append(l)
if global_step % 5 == 0:
writer.add_summary(summaries, global_step)
global_step += 1
train_loss_gen = np.mean(train_losses)
# compute likelihood over test data
test_losses = []
for d in test_data:
feed_dict = make_feed_dict(d)
l = sess.run(bits_per_dim_test, feed_dict)
test_losses.append(l)
test_loss_gen = np.mean(test_losses)
test_bpd.append(test_loss_gen)
file_logger.write("%f\n" % test_loss_gen)
# log progress to console
print("Iteration %d, time = %ds, train bits_per_dim = %.4f, test bits_per_dim = %.4f" % (epoch, time.time()-begin, train_loss_gen, test_loss_gen))
sys.stdout.flush()
if epoch % args.save_interval == 0:
# save params
saver.save(sess, args.save_dir + '/params_' + args.data_set + '.ckpt')
np.savez(args.save_dir + '/test_bpd_' + args.data_set + '.npz', test_bpd=np.array(test_bpd))
|
import sys
import copy
from rlpyt.utils.launching.affinity import encode_affinity, quick_affinity_code
from rlpyt.utils.launching.exp_launcher import run_experiments
from rlpyt.utils.launching.variant import make_variants, VariantLevel
args = sys.argv[1:]
assert len(args) == 2
my_computer = int(args[0])
num_computers = int(args[1])
print(f"MY_COMPUTER: {my_computer}, NUM_COMPUTERS: {num_computers}")
script = "rlpyt/ul/experiments/rl_from_ul/scripts/atari/train/atari_ppo_from_ul_serial.py"
affinity_code = quick_affinity_code(contexts_per_gpu=3)
runs_per_setting = 3
experiment_title = "ppo_from_vae_first_1"
variant_levels_1 = list()
variant_levels_2 = list()
# variant_levels_3 = list()
learning_rates = [1e-3]
lr_schedules = ["cosine"]
lr_warmups = [1e3]
values = list(zip(learning_rates, lr_schedules, lr_warmups))
dir_names = ["{}lr_{}sched_{}wrmp".format(*v) for v in values]
keys = [("pretrain", "learning_rate"), ("pretrain", "learning_rate_anneal"),
("pretrain", "learning_rate_warmup")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
n_updates = [20e3, 100e3]
values = list(zip(n_updates))
dir_names = ["{}updates".format(*v) for v in values]
keys = [("pretrain", "n_updates")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# n_steps_predict = [0, 1, 3]
# hidden_sizes = [None, 512, 512]
n_steps_predict = [3]
hidden_sizes = [512]
values = list(zip(n_steps_predict, hidden_sizes))
dir_names = ["{}nstepspredict_{}hdsz".format(*v) for v in values]
keys = [("pretrain", "n_steps_predict"), ("pretrain", "hidden_sizes")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
kl_losses = [1., 0.1]
values = list(zip(kl_losses))
dir_names = ["{}klcoef".format(*v) for v in values]
keys = [("pretrain", "kl_coeff")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# games = ["pong", "qbert", "seaquest", "space_invaders",
# "alien", "breakout", "frostbite", "gravitar"]
games = ["breakout", "gravitar", "qbert", "space_invaders"]
values = list(zip(games))
dir_names = games
keys = [("env", "game")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# variant_levels_2.append(VariantLevel(keys, values, dir_names))
# variant_levels_3.append(VariantLevel(keys, values, dir_names))
##################################################
# RL CONFIG (mostly)
n_steps = [25e6]
pretrain_algos = ["VAE"]
replays = ["20200608/15M_VecEps_B78"]
model_dirs = ["/data/adam/ul4rl/models/20200825/atari_ul_vae_first_1/"]
values = list(zip(
n_steps,
pretrain_algos,
replays,
model_dirs,
))
dir_names = ["RlFromUl"] # TRAIN SCRIPT SPLITS OFF THIS
keys = [
("runner", "n_steps"),
("pretrain", "algo"),
("pretrain", "replay"),
("pretrain", "model_dir"),
]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# variant_levels_2.append(VariantLevel(keys, values, dir_names))
stop_grads = ["conv"]
load_fc1s = [False]
hidden_sizes = [512] # I guess to be fair? there is also fc1=512
values = list(zip(stop_grads, load_fc1s, hidden_sizes))
dir_names = ["{}_stpgrd_{}loadfc1_{}hs".format(*v) for v in values]
keys = [("model", "stop_grad"), ("agent", "load_fc1"),
("model", "hidden_sizes")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
# variant_levels_2.append(VariantLevel(keys, values, dir_names))
normalize_convs = [False, True]
values = list(zip(normalize_convs))
dir_names = ["{}normconv".format(*v) for v in values]
keys = [("model", "normalize_conv_out")]
variant_levels_1.append(VariantLevel(keys, values, dir_names))
variants_1, log_dirs_1 = make_variants(*variant_levels_1)
# variants_2, log_dirs_2 = make_variants(*variant_levels_2)
variants = variants_1 # + variants_2
log_dirs = log_dirs_1 # + log_dirs_2
num_variants = len(variants)
variants_per = num_variants // num_computers
my_start = my_computer * variants_per
if my_computer == num_computers - 1:
my_end = num_variants
else:
my_end = (my_computer + 1) * variants_per
my_variants = variants[my_start:my_end]
my_log_dirs = log_dirs[my_start:my_end]
default_config_key = "ppo_16env"
run_experiments(
script=script,
affinity_code=affinity_code,
experiment_title=experiment_title,
runs_per_setting=runs_per_setting,
variants=my_variants,
log_dirs=my_log_dirs,
common_args=(default_config_key, experiment_title),
)
|
# coding:utf-8
from smtplib import SMTP
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
import os
def myMail(from_email,passwd,to_email,project,content):
SMTPSVR = SMTP('smtp.exmail.qq.com')
to = ','.join(to_email)
msg = MIMEMultipart('alternatvie')
msg['Subject'] = Header(project,"utf-8")
msg['From'] = r"%s " % Header("info@mhealth365.com","utf-8")
msg['To'] = to
content = MIMEText(content,'html', 'utf-8')
msg.attach(content)
sendSvr = SMTPSVR
sendSvr.login(from_email,passwd)
errs = sendSvr.sendmail(from_email,to_email,msg.as_string())
sendSvr.quit()
if __name__=='__main__':
from_email = 'info@mhealth365.com'
passwd = 'mHealth365Dev'
to_email = ['jiangfengwei_2@126.com']
project = 'ecg'
content = '<h1>hello</h1>'
myMail(from_email,passwd,to_email,project,content)
|
"""
Classic cart-pole system implemented by Rich Sutton et al.
Copied from http://incompleteideas.net/sutton/book/code/pole.c
permalink: https://perma.cc/C9ZM-652R
"""
import math
import gym
from gym import spaces, logger
from gym.utils import seeding
import numpy as np
class CartPoleEnv(gym.Env):
"""
Description:
A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum starts upright, and the goal is to prevent it from falling over by increasing and reducing the cart's velocity.
Source:
This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson
Observation:
Type: Box(4)
Num Observation Min Max
0 Cart Position -4.8 4.8
1 Cart Velocity -Inf Inf
2 Pole Angle -24 deg 24 deg
3 Pole Velocity At Tip -Inf Inf
Actions:
Type: Discrete(2)
Num Action
0 Push cart to the left
1 Push cart to the right
Note: The amount the velocity that is reduced or increased is not fixed; it depends on the angle the pole is pointing. This is because the center of gravity of the pole increases the amount of energy needed to move the cart underneath it
Reward:
Reward is 1 for every step taken, including the termination step
Starting State:
All observations are assigned a uniform random value in [-0.05..0.05]
Episode Termination:
Pole Angle is more than 12 degrees
Cart Position is more than 2.4 (center of the cart reaches the edge of the display)
Episode length is greater than 200
Solved Requirements
Considered solved when the average reward is greater than or equal to 195.0 over 100 consecutive trials.
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 50
}
def __init__(self):
self.theta = 0
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = (self.masspole + self.masscart)
self.length = 0.5 # actually half the pole's length
self.polemass_length = (self.masspole * self.length)
self.force_mag = 10.0
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = 'euler'
# Angle at which to fail the episode
self.theta_threshold_radians = 50 * math.pi / 180 # 12 * math.pi / 180
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation is still within bounds
high = np.array([
self.x_threshold * 2,
np.finfo(np.float32).max,
self.theta_threshold_radians * 2,
np.finfo(np.float32).max])
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.seed()
self.viewer = None
self.state = None
self.steps_beyond_done = None
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
# Action could be user action or machine action
def step(self, action, user_input=None):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
state = self.state
x, x_dot, self.theta, theta_dot = state
# Applies force based on user input, if user input is available
if user_input is not None:
force = self.force_mag if user_input == 1 else -self.force_mag
else:
force = self.force_mag if action == 1 else -self.force_mag
costheta = math.cos(self.theta)
sintheta = math.sin(self.theta)
temp = (force + self.polemass_length * theta_dot * theta_dot * sintheta) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / (
self.length * (4.0 / 3.0 - self.masspole * costheta * costheta / self.total_mass))
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == 'euler':
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
self.theta = self.theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else: # semi-implicit euler
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
self.theta = self.theta + self.tau * theta_dot
self.state = (x, x_dot, self.theta, theta_dot)
done = x < -self.x_threshold \
or x > self.x_threshold \
or self.theta < -self.theta_threshold_radians \
or self.theta > self.theta_threshold_radians
done = bool(done)
if not done:
# Assigns reward as Gaussian function of angle theta of pole
# Bell curve of reward will be different (but this is expected)
if user_input is not None:
# Even if angle is 0, reward may not be high if the machine action is not equal to user action
# Even if angle is high in magnitude, reward may not be low if the machine action is equal to the user action
# This is is so that the DQN is optimizes for future reward
# The Bellman will optimize for best long term user action prediction
# Learning a reward ditribution based on model action's similarity to action of PID controller.
reward = 0.2 * self.gaussian_function(x=self.theta, sigma=np.deg2rad(10),
mu=0) + 0.8 * self.gaussian_function(x=action,
sigma=np.deg2rad(0.5),
mu=user_input)
# reward = self.gaussian_function(x=action, sigma=np.deg2rad(10), mu=user_input)
else:
# If angle is close to zero then rewards is higher and vice versa
# The Bellman should optimize for best long term angle
reward = self.gaussian_function(x=self.theta, sigma=np.deg2rad(10), mu=0)
elif self.steps_beyond_done is None:
# Pole just fell!
self.steps_beyond_done = 0
reward = 1.0
else:
if self.steps_beyond_done == 0:
logger.warn(
"You are calling 'step()' even though this environment has already returned done = True. You should always call 'reset()' once you receive 'done = True' -- any further steps are undefined behavior.")
self.steps_beyond_done += 1
reward = 0.0
return np.array(self.state), reward, done, {}
def reset(self):
self.state = self.np_random.uniform(low=-0.05, high=0.05, size=(4,))
self.steps_beyond_done = None
return np.array(self.state)
def render(self, mode='human'):
screen_width = 600
screen_height = 400
world_width = self.x_threshold * 2
scale = screen_width / world_width
carty = 100 # TOP OF CART
polewidth = 10.0
polelen = scale * (2 * self.length)
cartwidth = 50.0
cartheight = 30.0
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
axleoffset = cartheight / 4.0
cart = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
self.carttrans = rendering.Transform()
cart.add_attr(self.carttrans)
self.viewer.add_geom(cart)
l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2
pole = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
pole.set_color(.8, .6, .4)
self.poletrans = rendering.Transform(translation=(0, axleoffset))
pole.add_attr(self.poletrans)
pole.add_attr(self.carttrans)
self.viewer.add_geom(pole)
self.axle = rendering.make_circle(polewidth / 2)
self.axle.add_attr(self.poletrans)
self.axle.add_attr(self.carttrans)
self.axle.set_color(.5, .5, .8)
self.viewer.add_geom(self.axle)
self.track = rendering.Line((0, carty), (screen_width, carty))
self.track.set_color(0, 0, 0)
self.viewer.add_geom(self.track)
self._pole_geom = pole
if self.state is None: return None
# Edit the pole polygon vertex
pole = self._pole_geom
l, r, t, b = -polewidth / 2, polewidth / 2, polelen - polewidth / 2, -polewidth / 2
pole.v = [(l, b), (l, t), (r, t), (r, b)]
x = self.state
cartx = x[0] * scale + screen_width / 2.0 # MIDDLE OF CART
self.carttrans.set_translation(cartx, carty)
self.poletrans.set_rotation(-x[2])
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
def gaussian_function(self, x, sigma, mu):
"""
Gaussian function used in computing reward
:param sigma: std
:param mu: mean
:return: value of gaussian function at x
"""
return np.exp(-0.5 * (((x - mu) / sigma) ** 2))
|
import torch
import pytest
from collections import namedtuple
from functools import partial
from pytorch_lightning.metrics.regression import MeanSquaredError, MeanAbsoluteError, MeanSquaredLogError
from sklearn.metrics import mean_squared_error, mean_absolute_error, mean_squared_log_error
from tests.metrics.utils import compute_batch, NUM_BATCHES, BATCH_SIZE
torch.manual_seed(42)
num_targets = 5
Input = namedtuple('Input', ["preds", "target"])
_single_target_inputs = Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE),
target=torch.rand(NUM_BATCHES, BATCH_SIZE),
)
_multi_target_inputs = Input(
preds=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
target=torch.rand(NUM_BATCHES, BATCH_SIZE, num_targets),
)
def _single_target_sk_metric(preds, target, sk_fn=mean_squared_error):
sk_preds = preds.view(-1).numpy()
sk_target = target.view(-1).numpy()
return sk_fn(sk_preds, sk_target)
def _multi_target_sk_metric(preds, target, sk_fn=mean_squared_error):
sk_preds = preds.view(-1, num_targets).numpy()
sk_target = target.view(-1, num_targets).numpy()
return sk_fn(sk_preds, sk_target)
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("ddp_sync_on_step", [True, False])
@pytest.mark.parametrize(
"preds, target, sk_metric",
[
(_single_target_inputs.preds, _single_target_inputs.target, _single_target_sk_metric),
(_multi_target_inputs.preds, _multi_target_inputs.target, _multi_target_sk_metric),
],
)
@pytest.mark.parametrize(
"metric_class, sk_fn",
[
(MeanSquaredError, mean_squared_error),
(MeanAbsoluteError, mean_absolute_error),
(MeanSquaredLogError, mean_squared_log_error),
],
)
def test_mean_error(ddp, ddp_sync_on_step, preds, target, sk_metric, metric_class, sk_fn):
compute_batch(preds, target, metric_class, partial(sk_metric, sk_fn=sk_fn), ddp_sync_on_step, ddp)
@pytest.mark.parametrize("metric_class", [MeanSquaredError, MeanAbsoluteError, MeanSquaredLogError])
def test_error_on_different_shape(metric_class):
metric = metric_class()
with pytest.raises(RuntimeError,
match='Predictions and targets are expected to have the same shape'):
metric(torch.randn(100,), torch.randn(50,))
|
"""Test for RFlink light components.
Test setup of rflink lights component/platform. State tracking and
control of Rflink switch devices.
"""
import asyncio
from homeassistant.components.light import ATTR_BRIGHTNESS
from homeassistant.components.rflink import EVENT_BUTTON_PRESSED
from homeassistant.const import (
ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON)
from homeassistant.core import callback
from ..test_rflink import mock_rflink
DOMAIN = 'light'
CONFIG = {
'rflink': {
'port': '/dev/ttyABC0',
'ignore_devices': ['ignore_wildcard_*', 'ignore_light'],
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'aliasses': ['test_alias_0_0'],
},
'dimmable_0_0': {
'name': 'dim_test',
'type': 'dimmable',
},
'switchable_0_0': {
'name': 'switch_test',
'type': 'switchable',
}
},
},
}
@asyncio.coroutine
def test_default_setup(hass, monkeypatch):
"""Test all basic functionality of the rflink switch component."""
# setup mocking rflink module
event_callback, create, protocol, _ = yield from mock_rflink(
hass, CONFIG, DOMAIN, monkeypatch)
# make sure arguments are passed
assert create.call_args_list[0][1]['ignore']
# test default state of light loaded from config
light_initial = hass.states.get(DOMAIN + '.test')
assert light_initial.state == 'off'
assert light_initial.attributes['assumed_state']
# light should follow state of the hardware device by interpreting
# incoming events for its name and aliasses
# mock incoming command event for this device
event_callback({
'id': 'protocol_0_0',
'command': 'on',
})
yield from hass.async_block_till_done()
light_after_first_command = hass.states.get(DOMAIN + '.test')
assert light_after_first_command.state == 'on'
# also after receiving first command state not longer has to be assumed
assert not light_after_first_command.attributes.get('assumed_state')
# mock incoming command event for this device
event_callback({
'id': 'protocol_0_0',
'command': 'off',
})
yield from hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.test').state == 'off'
# should repond to group command
event_callback({
'id': 'protocol_0_0',
'command': 'allon',
})
yield from hass.async_block_till_done()
light_after_first_command = hass.states.get(DOMAIN + '.test')
assert light_after_first_command.state == 'on'
# should repond to group command
event_callback({
'id': 'protocol_0_0',
'command': 'alloff',
})
yield from hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.test').state == 'off'
# test following aliasses
# mock incoming command event for this device alias
event_callback({
'id': 'test_alias_0_0',
'command': 'on',
})
yield from hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.test').state == 'on'
# test event for new unconfigured sensor
event_callback({
'id': 'protocol2_0_1',
'command': 'on',
})
yield from hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.protocol2_0_1').state == 'on'
# test changing state from HA propagates to Rflink
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DOMAIN + '.test'}))
yield from hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.test').state == 'off'
assert protocol.send_command_ack.call_args_list[0][0][0] == 'protocol_0_0'
assert protocol.send_command_ack.call_args_list[0][0][1] == 'off'
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DOMAIN + '.test'}))
yield from hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.test').state == 'on'
assert protocol.send_command_ack.call_args_list[1][0][1] == 'on'
# protocols supporting dimming and on/off should create hybrid light entity
event_callback({
'id': 'newkaku_0_1',
'command': 'off',
})
yield from hass.async_block_till_done()
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DOMAIN + '.newkaku_0_1'}))
yield from hass.async_block_till_done()
# dimmable should send highest dim level when turning on
assert protocol.send_command_ack.call_args_list[2][0][1] == '15'
# and send on command for fallback
assert protocol.send_command_ack.call_args_list[3][0][1] == 'on'
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: DOMAIN + '.newkaku_0_1',
ATTR_BRIGHTNESS: 128,
}))
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[4][0][1] == '7'
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: DOMAIN + '.dim_test',
ATTR_BRIGHTNESS: 128,
}))
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[5][0][1] == '7'
@asyncio.coroutine
def test_firing_bus_event(hass, monkeypatch):
"""Incoming Rflink command events should be put on the HA event bus."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'aliasses': ['test_alias_0_0'],
'fire_event': True,
},
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
calls = []
@callback
def listener(event):
calls.append(event)
hass.bus.async_listen_once(EVENT_BUTTON_PRESSED, listener)
# test event for new unconfigured sensor
event_callback({
'id': 'protocol_0_0',
'command': 'off',
})
yield from hass.async_block_till_done()
assert calls[0].data == {'state': 'off', 'entity_id': DOMAIN + '.test'}
@asyncio.coroutine
def test_signal_repetitions(hass, monkeypatch):
"""Command should be sent amount of configured repetitions."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'device_defaults': {
'signal_repetitions': 3,
},
'devices': {
'protocol_0_0': {
'name': 'test',
'signal_repetitions': 2,
},
'protocol_0_1': {
'name': 'test1',
},
'newkaku_0_1': {
'type': 'hybrid',
}
},
},
}
# setup mocking rflink module
event_callback, _, protocol, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
# test if signal repetition is performed according to configuration
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DOMAIN + '.test'}))
# wait for commands and repetitions to finish
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 2
# test if default apply to configured devcies
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DOMAIN + '.test1'}))
# wait for commands and repetitions to finish
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 5
# test if device defaults apply to newly created devices
event_callback({
'id': 'protocol_0_2',
'command': 'off',
})
# make sure entity is created before setting state
yield from hass.async_block_till_done()
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DOMAIN + '.protocol_0_2'}))
# wait for commands and repetitions to finish
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_count == 8
@asyncio.coroutine
def test_signal_repetitions_alternation(hass, monkeypatch):
"""Simultaneously switching entities must alternate repetitions."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'signal_repetitions': 2,
},
'protocol_0_1': {
'name': 'test1',
'signal_repetitions': 2,
},
},
},
}
# setup mocking rflink module
_, _, protocol, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DOMAIN + '.test'}))
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DOMAIN + '.test1'}))
yield from hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[0][0][0] == 'protocol_0_0'
assert protocol.send_command_ack.call_args_list[1][0][0] == 'protocol_0_1'
assert protocol.send_command_ack.call_args_list[2][0][0] == 'protocol_0_0'
assert protocol.send_command_ack.call_args_list[3][0][0] == 'protocol_0_1'
@asyncio.coroutine
def test_signal_repetitions_cancelling(hass, monkeypatch):
"""Cancel outstanding repetitions when state changed."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'signal_repetitions': 3,
},
},
},
}
# setup mocking rflink module
_, _, protocol, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: DOMAIN + '.test'}))
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DOMAIN + '.test'}))
yield from hass.async_block_till_done()
print(protocol.send_command_ack.call_args_list)
assert protocol.send_command_ack.call_args_list[0][0][1] == 'off'
assert protocol.send_command_ack.call_args_list[1][0][1] == 'on'
assert protocol.send_command_ack.call_args_list[2][0][1] == 'on'
assert protocol.send_command_ack.call_args_list[3][0][1] == 'on'
@asyncio.coroutine
def test_type_toggle(hass, monkeypatch):
"""Test toggle type lights (on/on)."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'toggle_0_0': {
'name': 'toggle_test',
'type': 'toggle',
},
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
assert hass.states.get(DOMAIN + '.toggle_test').state == 'off'
# test sending on command to toggle alias
event_callback({
'id': 'toggle_0_0',
'command': 'on',
})
yield from hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.toggle_test').state == 'on'
# test sending group command to group alias
event_callback({
'id': 'toggle_0_0',
'command': 'on',
})
yield from hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.toggle_test').state == 'off'
@asyncio.coroutine
def test_group_alias(hass, monkeypatch):
"""Group aliases should only respond to group commands (allon/alloff)."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'group_aliasses': ['test_group_0_0'],
},
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
assert hass.states.get(DOMAIN + '.test').state == 'off'
# test sending group command to group alias
event_callback({
'id': 'test_group_0_0',
'command': 'allon',
})
yield from hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.test').state == 'on'
# test sending group command to group alias
event_callback({
'id': 'test_group_0_0',
'command': 'off',
})
yield from hass.async_block_till_done()
assert hass.states.get(DOMAIN + '.test').state == 'on'
@asyncio.coroutine
def test_nogroup_alias(hass, monkeypatch):
"""Non group aliases should not respond to group commands."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'protocol_0_0': {
'name': 'test',
'nogroup_aliasses': ['test_nogroup_0_0'],
},
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
assert hass.states.get(DOMAIN + '.test').state == 'off'
# test sending group command to nogroup alias
event_callback({
'id': 'test_nogroup_0_0',
'command': 'allon',
})
yield from hass.async_block_till_done()
# should not affect state
assert hass.states.get(DOMAIN + '.test').state == 'off'
# test sending group command to nogroup alias
event_callback({
'id': 'test_nogroup_0_0',
'command': 'on',
})
yield from hass.async_block_till_done()
# should affect state
assert hass.states.get(DOMAIN + '.test').state == 'on'
@asyncio.coroutine
def test_nogroup_device_id(hass, monkeypatch):
"""Device id that do not respond to group commands (allon/alloff)."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'test_nogroup_0_0': {
'name': 'test',
'group': False,
},
},
},
}
# setup mocking rflink module
event_callback, _, _, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
assert hass.states.get(DOMAIN + '.test').state == 'off'
# test sending group command to nogroup
event_callback({
'id': 'test_nogroup_0_0',
'command': 'allon',
})
yield from hass.async_block_till_done()
# should not affect state
assert hass.states.get(DOMAIN + '.test').state == 'off'
# test sending group command to nogroup
event_callback({
'id': 'test_nogroup_0_0',
'command': 'on',
})
yield from hass.async_block_till_done()
# should affect state
assert hass.states.get(DOMAIN + '.test').state == 'on'
@asyncio.coroutine
def test_disable_automatic_add(hass, monkeypatch):
"""If disabled new devices should not be automatically added."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'automatic_add': False,
},
}
# setup mocking rflink module
event_callback, _, _, _ = yield from mock_rflink(
hass, config, DOMAIN, monkeypatch)
# test event for new unconfigured sensor
event_callback({
'id': 'protocol_0_0',
'command': 'off',
})
yield from hass.async_block_till_done()
# make sure new device is not added
assert not hass.states.get(DOMAIN + '.protocol_0_0')
|
from collections import defaultdict
import pandas as pd
from pycocotools.coco import COCO
import numpy as np
class Enhance_COCO(COCO):
def __init__(self, path):
super().__init__(path)
self.classes = defaultdict()
self.reverse_classes = defaultdict()
for category in self.loadCats(self.getCatIds()):
self.classes[category['id']] = category['name']
self.reverse_classes[category['name']] = category['id']
def get_cats_by_imgs(self, imgIds, return_name=False):
""" Given some image ids, and return the category in these imgs
Args:
imgIds: the image ids
return_name: return the name of category or the index of category, when return_name = 'true', return names, else return indexs
"""
annotations = self.loadAnns(self.getAnnIds(imgIds=imgIds)) # get annotations
#get categoryId appear in annotations
catIds = [ann['category_id'] for ann in annotations]
catIds = list(set(catIds))
if not return_name:
return catIds
else:
catNames = []
#get category name from categord Id
for catId in catIds:
catNames.append(self.classes[catId])
return catNames
def get_imgs_by_cats(self, catIds):
""" Given some indexs of category, and return all imgs having these category
Args:
catIds: the index of category
"""
if type(catIds) == list:
imgIds = set()
for catId in catIds:
imgIds.update(self.getImgIds(catIds=catId))
return list(imgIds)
else:
return self.getImgIds(catIds=catIds)
def catId_to_name(self, catIds):
"""Convert the indexs of categories to the name of them
Args:
catIds: a list of int or int, indicating the index of categories
Return: a list contains the name of category which is given
"""
if type(catIds) == int:
return [self.classes[catIds]]
else:
names = [self.classes[catId] for catId in catIds]
return names
def catName_to_id(self, names, sort = True):
"""Convert the names of categories to the index of them
Args:
names: a list of str or str, indicating the nameof categories
sort: whether the list which is returned is well-sorted, default = True
Return: a list contains the indexs of category given
"""
if isinstance(names, str):
return [self.reverse_classes[names]]
# names = list(set(names))
ids = []
for name in names:
ids.append(self.reverse_classes[name])
if sort:
ids.sort()
return ids
def get_catNum_by_catId(self, catIds):
result = {'image':[], 'object':[]}
index = []
catIds.sort()
for catId in catIds:
index.append(self.classes[catId])
result['image'].append(len(self.getImgIds(catIds = catId)))
result['object'].append(len(self.getAnnIds(catIds = catId)))
index.append('Counts')
result['image'].append(sum(result['image']))
result['object'].append(sum(result['object']))
result = pd.DataFrame(result, index=index)
result.sort_values(by=['image'], ascending=False)
return result
def get_catNum_by_imgs(self, imgIds):
#get annotations
annotations = self.loadAnns(self.getAnnIds(imgIds=imgIds))
#get categoryId appear in annotations
catIds = [ann['category_id'] for ann in annotations]
result = {'image':[], 'object':[]}
index = self.catId_to_name(list(set(catIds)))
#object counts
result['object'] = np.unique(catIds, return_counts=True)[1].tolist()
catIds = list(set(catIds))
for catId in catIds:
result['image'].append(len(set(self.getImgIds(catIds = catId)) & set(imgIds)))
print('Counts meaning for image is your input imgIds number')
index.append('Counts')
result['image'].append(len(imgIds))
result['object'].append(sum(result['object']))
result = pd.DataFrame(result, index=index)
result.sort_values(by=['image'], ascending=False)
return result
|
from collections import defaultdict
from enum import Enum
import numpy as np
import pandas as pd
from pandas.testing import assert_frame_equal
import unittest
from unittest import mock
from pyEpiabm.property import InfectionStatus
from pyEpiabm.utility import StateTransitionMatrix
from pyEpiabm.tests.test_unit.parameter_config_tests import TestPyEpiabm
class TestStateTransitionMatrix(TestPyEpiabm):
"""Test the 'StateTransitionMatrix' class.
"""
def setUp(self) -> None:
self.empty_coefficients = defaultdict(int)
self.matrix_object = StateTransitionMatrix(self.empty_coefficients)
self.real_coefficients = {
"prob_exposed_to_asympt": 0.4,
"prob_exposed_to_mild": 0.4,
"prob_exposed_to_gp": 0.2,
"prob_gp_to_recov": 0.9,
"prob_gp_to_hosp": 0.1,
"prob_hosp_to_recov": 0.6,
"prob_hosp_to_icu": 0.2,
"prob_hosp_to_death": 0.2,
"prob_icu_to_icurecov": 0.5,
"prob_icu_to_death": 0.5
}
self.list_coefficients = defaultdict(
int, {
"prob_exposed_to_asympt": [0.8, 0.2],
"prob_exposed_to_mild": [0.2, 0.8],
}
)
self.age_prop = [0.1, 0.9]
def test_init(self):
self.assertIsInstance(self.matrix_object.matrix, pd.DataFrame)
self.assertFalse(self.matrix_object.age_dependent)
self.matrix_object_ad = StateTransitionMatrix(self.empty_coefficients,
use_ages=True)
self.assertTrue(self.matrix_object_ad.age_dependent)
def test_create_empty_transition_matrix(self):
"""Tests the build_state_transition_matrix method by asserting if it is
equal to the initial matrix expected.
"""
empty_mat = self.matrix_object.create_empty_state_transition_matrix()
labels = [status.name for status in InfectionStatus]
zero_filled_dataframe = pd.DataFrame(np.zeros((len(InfectionStatus),
len(InfectionStatus))),
columns=labels, index=labels,
dtype='object')
assert_frame_equal(empty_mat, zero_filled_dataframe)
def test_create_state_transition_matrix(self):
"""Tests the fill_state_transition_matrix method and asserts that each row
sums to 1 (ie that the transition probabilities for each current
infection status sum to 1).
"""
filled_matrix = StateTransitionMatrix(self.real_coefficients)
row_sums = filled_matrix.matrix.sum(axis=1)
for i in row_sums:
self.assertAlmostEqual(i, 1)
def test_update_probability(self):
# Test method updates probability as expected
row_status = InfectionStatus.Susceptible
column_status = InfectionStatus.Exposed
new_probability = 0.5
transition_matrix = self.matrix_object.matrix
self.matrix_object.update_probability(row_status, column_status,
new_probability)
self.assertEqual(0.5,
transition_matrix.loc['Susceptible', 'Exposed'])
# Test error for incorrect columns is raised
class TestInfectionStatus(Enum):
Susceptiblesssss = 1
with self.assertRaises(ValueError):
row = TestInfectionStatus.Susceptiblesssss
column = TestInfectionStatus.Susceptiblesssss
self.matrix_object.update_probability(row, column, 0.5)
with self.assertRaises(ValueError):
row = None
column = None
self.matrix_object.update_probability(row, column, 0.5)
# Test error for incorrect probability is raised
with self.assertRaises(ValueError):
row = InfectionStatus.Susceptible
column = InfectionStatus.Susceptible
self.matrix_object.update_probability(row, column, 10.0)
def test_age_dependance(self):
with mock.patch('pyEpiabm.Parameters.instance') as mock_param:
mock_param.return_value.age_proportions = self.age_prop
matrix_object = StateTransitionMatrix(self.list_coefficients,
use_ages=True)
mock_param.assert_not_called
output = matrix_object.matrix
self.assertListEqual([0.8, 0.2], output.loc['Exposed',
'InfectASympt'])
self.assertEqual([0.2, 0.8], output.loc['Exposed', 'InfectMild'])
def test_remove_age_dependance(self):
with mock.patch('pyEpiabm.Parameters.instance') as mock_param:
mock_param.return_value.age_proportions = self.age_prop
matrix_object = StateTransitionMatrix(self.list_coefficients,
use_ages=False)
mock_param.assert_called_once
output = matrix_object.matrix
self.assertAlmostEqual(0.26, output.loc['Exposed', 'InfectASympt'])
self.assertAlmostEqual(0.74, output.loc['Exposed', 'InfectMild'])
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import re
import torch
from fvcore.common.checkpoint import (
get_missing_parameters_message,
get_unexpected_parameters_message,
)
from fsdet.config import global_cfg
def convert_basic_c2_names(original_keys):
"""
Apply some basic name conversion to names in C2 weights.
It only deals with typical backbone models.
Args:
original_keys (list[str]):
Returns:
list[str]: The same number of strings matching those in original_keys.
"""
layer_keys = copy.deepcopy(original_keys)
layer_keys = [
{"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
] # some hard-coded mappings
layer_keys = [k.replace("_", ".") for k in layer_keys]
layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
# Uniform both bn and gn names to "norm"
layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
# stem
layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
# to avoid mis-matching with "conv1" in other components (e.g. detection head)
layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
# layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
# layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
# layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
# layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
# layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
# blocks
layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
# DensePose substitutions
layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
return layer_keys
def convert_c2_detectron_names(weights):
"""
Map Caffe2 Detectron weight names to Detectron2 names.
Args:
weights (dict): name -> tensor
Returns:
dict: detectron2 names -> tensor
dict: detectron2 names -> C2 names
"""
logger = logging.getLogger(__name__)
logger.info("Remapping C2 weights ......")
original_keys = sorted(weights.keys())
layer_keys = copy.deepcopy(original_keys)
layer_keys = convert_basic_c2_names(layer_keys)
# --------------------------------------------------------------------------
# RPN hidden representation conv
# --------------------------------------------------------------------------
# FPN case
# In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
# shared for all other levels, hence the appearance of "fpn2"
layer_keys = [
k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
]
# Non-FPN case
layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
# --------------------------------------------------------------------------
# RPN box transformation conv
# --------------------------------------------------------------------------
# FPN case (see note above about "fpn2")
layer_keys = [
k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# Non-FPN case
layer_keys = [
k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
]
layer_keys = [
k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
for k in layer_keys
]
# --------------------------------------------------------------------------
# Fast R-CNN box head
# --------------------------------------------------------------------------
layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
# 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
# --------------------------------------------------------------------------
# FPN lateral and output convolutions
# --------------------------------------------------------------------------
def fpn_map(name):
"""
Look for keys with the following patterns:
1) Starts with "fpn.inner."
Example: "fpn.inner.res2.2.sum.lateral.weight"
Meaning: These are lateral pathway convolutions
2) Starts with "fpn.res"
Example: "fpn.res2.2.sum.weight"
Meaning: These are FPN output convolutions
"""
splits = name.split(".")
norm = ".norm" if "norm" in splits else ""
if name.startswith("fpn.inner."):
# splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
stage = int(splits[2][len("res") :])
return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
elif name.startswith("fpn.res"):
# splits example: ['fpn', 'res2', '2', 'sum', 'weight']
stage = int(splits[1][len("res") :])
return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
return name
layer_keys = [fpn_map(k) for k in layer_keys]
# --------------------------------------------------------------------------
# Mask R-CNN mask head
# --------------------------------------------------------------------------
# roi_heads.StandardROIHeads case
layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
# roi_heads.Res5ROIHeads case
layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
# --------------------------------------------------------------------------
# Keypoint R-CNN head
# --------------------------------------------------------------------------
# interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
layer_keys = [
k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
]
layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
# --------------------------------------------------------------------------
# Done with replacements
# --------------------------------------------------------------------------
assert len(set(layer_keys)) == len(layer_keys)
assert len(original_keys) == len(layer_keys)
new_weights = {}
new_keys_to_original_keys = {}
for orig, renamed in zip(original_keys, layer_keys):
new_keys_to_original_keys[renamed] = orig
if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
# remove the meaningless prediction weight for background class
new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
new_weights[renamed] = weights[orig][new_start_idx:]
logger.info(
"Remove prediction weight for background class in {}. The shape changes from "
"{} to {}.".format(
renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
)
)
elif renamed.startswith("cls_score."):
# move weights of bg class from original index 0 to last index
logger.info(
"Move classification weights for background class in {} from index 0 to "
"index {}.".format(renamed, weights[orig].shape[0] - 1)
)
new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
else:
new_weights[renamed] = weights[orig]
return new_weights, new_keys_to_original_keys
# Note the current matching is not symmetric.
# it assumes model_state_dict will have longer names.
def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
"""
Match names between the two state-dict, and update the values of model_state_dict in-place with
copies of the matched tensor in ckpt_state_dict.
If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
model and will be renamed at first.
Strategy: suppose that the models that we will create will have prefixes appended
to each of its keys, for example due to an extra level of nesting that the original
pre-trained weights from ImageNet won't contain. For example, model.state_dict()
might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
res2.conv1.weight. We thus want to match both parameters together.
For that, we look for each model weight, look among all loaded keys if there is one
that is a suffix of the current weight name, and use it if that's the case.
If multiple matches exist, take the one with longest size
of the corresponding name. For example, for the same model as before, the pretrained
weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
we want to match backbone[0].body.conv1.weight to conv1.weight, and
backbone[0].body.res2.conv1.weight to res2.conv1.weight.
"""
model_keys = sorted(list(model_state_dict.keys()))
if c2_conversion:
ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
# original_keys: the name in the original dict (before renaming)
else:
original_keys = {x: x for x in ckpt_state_dict.keys()}
ckpt_keys = sorted(list(ckpt_state_dict.keys()))
def match(a, b):
# Matched ckpt_key should be a complete (starts with '.') suffix.
# For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
# but matches whatever_conv1 or mesh_head.whatever_conv1.
return a == b or a.endswith("." + b)
# get a matrix of string matches, where each (i, j) entry correspond to the size of the
# ckpt_key string, if it matches
match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
# use the matched one with longest size in case of multiple matches
max_match_size, idxs = match_matrix.max(1)
# remove indices that correspond to no-match
idxs[max_match_size == 0] = -1
# used for logging
max_len_model = max(len(key) for key in model_keys) if model_keys else 1
max_len_ckpt = max(len(key) for key in ckpt_keys) if ckpt_keys else 1
log_str_template = "{: <{}} loaded from {: <{}} of shape {}"
logger = logging.getLogger(__name__)
# matched_pairs (matched checkpoint key --> matched model key)
matched_keys = {}
for idx_model, idx_ckpt in enumerate(idxs.tolist()):
if idx_ckpt == -1:
continue
key_model = model_keys[idx_model]
key_ckpt = ckpt_keys[idx_ckpt]
value_ckpt = ckpt_state_dict[key_ckpt]
shape_in_model = model_state_dict[key_model].shape
if shape_in_model != value_ckpt.shape:
logger.warning(
"Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
key_ckpt, value_ckpt.shape, key_model, shape_in_model
)
)
logger.warning(
"{} will not be loaded. Please double check and see if this is desired.".format(
key_ckpt
)
)
continue
model_state_dict[key_model] = value_ckpt.clone()
if key_ckpt in matched_keys: # already added to matched_keys
logger.error(
"Ambiguity found for {} in checkpoint!"
"It matches at least two keys in the model ({} and {}).".format(
key_ckpt, key_model, matched_keys[key_ckpt]
)
)
raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
matched_keys[key_ckpt] = key_model
if not global_cfg.MUTE_HEADER:
logger.info(
log_str_template.format(
key_model,
max_len_model,
original_keys[key_ckpt],
max_len_ckpt,
tuple(shape_in_model),
)
)
matched_model_keys = matched_keys.values()
matched_ckpt_keys = matched_keys.keys()
# print warnings about unmatched keys on both side
unmatched_model_keys = [k for k in model_keys if k not in matched_model_keys]
if len(unmatched_model_keys):
logger.info(get_missing_parameters_message(unmatched_model_keys))
unmatched_ckpt_keys = [k for k in ckpt_keys if k not in matched_ckpt_keys]
if len(unmatched_ckpt_keys):
logger.info(
get_unexpected_parameters_message(original_keys[x] for x in unmatched_ckpt_keys)
)
|
import pygame
class Camera:
def __init__(self, player, level_end):
# Get game window size
screen_width = pygame.display.Info().current_w
screen_height = pygame.display.Info().current_h
# Passed attributes
self.player = player
self.level_end = level_end
# Class attributes
self.screen = pygame.Rect((0, 0, screen_width, screen_height))
def update(self):
# Horizontal position check
if self.player.rect.x > self.screen.centerx:
if self.screen.right < self.level_end[0]:
dx = self.player.rect.x - self.screen.centerx
self.screen.move_ip(dx, 0)
if self.screen.right > self.level_end[0]:
self.screen.right = self.level_end[0]
elif self.player.rect.x < self.screen.centerx:
if self.screen.left > 0:
dx = self.player.rect.x - self.screen.centerx
self.screen.move_ip(dx, 0)
if self.screen.left < 0:
self.screen.left = 0
# Vertical position check
if self.player.rect.y > self.screen.centery:
if self.screen.bottom < self.level_end[1]:
dy = self.player.rect.y - self.screen.centery
self.screen.move_ip(0, dy)
if self.screen.bottom > self.level_end[1]:
self.screen.bottom = self.level_end[1]
elif self.player.rect.y < self.screen.centery:
if self.screen.top > 0:
dy = self.player.rect.y - self.screen.centery
self.screen.move_ip(0, dy)
if self.screen.top < 0:
self.screen.top = 0
|
# Copyright 2018 The Simons Foundation, Inc. - All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netket as nk
# 1D Lattice
g = nk.graph.Hypercube(length=20, n_dim=1, pbc=True)
# Hilbert space of spins on the graph
# with total Sz equal to 0
hi = nk.hilbert.Spin(s=0.5, graph=g, total_sz=0)
# Heisenberg hamiltonian
ha = nk.operator.Heisenberg(hilbert=hi)
# Symmetric RBM Spin Machine
ma = nk.machine.JastrowSymm(hilbert=hi)
ma.init_random_parameters(seed=1234, sigma=0.01)
# Metropolis Exchange Sampling
# Notice that this sampler exchanges two neighboring sites
# thus preservers the total magnetization
sa = nk.sampler.MetropolisExchange(machine=ma)
# Optimizer
op = nk.optimizer.Sgd(learning_rate=0.05)
# Stochastic reconfiguration
gs = nk.variational.Vmc(
hamiltonian=ha,
sampler=sa,
optimizer=op,
n_samples=1000,
diag_shift=0.1,
method="Sr",
)
gs.run(out="test", n_iter=300)
|
import time
from math import log
from gpiozero import PWMLED
from gpiozero import MCP3008
pot = MCP3008(0)
hall = MCP3008(1)
led = PWMLED(21)
#led.source = pot.values
#led.source = hall.values
while True:
#print(pot.value)
brightness = hall.value + ((hall.value - 0.5151) * 5.0)
if(brightness > 1.0):
brightness = 1.0
elif(brightness < 0.0):
brightness = 0.0
led.value = brightness
print(brightness)
time.sleep(0.01)
###
|
from __future__ import absolute_import
from sentry.integrations import Integration, IntegrationFeatures, IntegrationProvider, IntegrationMetadata
from sentry.integrations.atlassian_connect import AtlassianConnectValidationError, get_integration_from_request
from sentry.integrations.repositories import RepositoryMixin
from sentry.pipeline import NestedPipelineView, PipelineView
from sentry.identity.pipeline import IdentityProviderPipeline
from django.utils.translation import ugettext_lazy as _
from sentry.models import Repository
from sentry.utils.http import absolute_uri
from .repository import BitbucketRepositoryProvider
from .client import BitbucketApiClient
from .issues import BitbucketIssueBasicMixin
DESCRIPTION = """
Bitbucket for Sentry.io
"""
metadata = IntegrationMetadata(
description=DESCRIPTION.strip(),
author='The Sentry Team',
noun=_('Installation'),
issue_url='https://github.com/getsentry/sentry/issues/new?title=Bitbucket%20Integration:%20&labels=Component%3A%20Integrations',
source_url='https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/bitbucket',
aspects={},
)
scopes = (
'account',
'issue:write',
'repository',
'repository:admin',
'team',
'webhook',
)
class BitbucketIntegration(Integration, BitbucketIssueBasicMixin, RepositoryMixin):
def get_client(self):
return BitbucketApiClient(
self.model.metadata['base_url'],
self.model.metadata['shared_secret'],
self.model.external_id,
)
@property
def username(self):
return self.model.name
def get_repositories(self):
repos = self.get_client().get_repos(self.username)['values']
data = []
for repo in repos:
data.append(
{
'identifier': repo['full_name'],
'name': repo['full_name'],
}
)
return data
def get_unmigratable_repositories(self):
repos = Repository.objects.filter(
organization_id=self.organization_id,
provider='bitbucket',
)
accessible_repos = [
r['identifier'] for r in self.get_repositories()
]
return filter(
lambda repo: repo.name not in accessible_repos,
repos,
)
def reinstall(self):
self.reinstall_repositories()
class BitbucketIntegrationProvider(IntegrationProvider):
key = 'bitbucket'
name = 'Bitbucket'
metadata = metadata
scopes = scopes
integration_cls = BitbucketIntegration
features = frozenset([IntegrationFeatures.ISSUE_BASIC, IntegrationFeatures.COMMITS])
def get_pipeline_views(self):
identity_pipeline_config = {
'redirect_url': absolute_uri('/extensions/bitbucket/setup/'),
}
identity_pipeline_view = NestedPipelineView(
bind_key='identity',
provider_key='bitbucket',
pipeline_cls=IdentityProviderPipeline,
config=identity_pipeline_config,
)
return [identity_pipeline_view, VerifyInstallation()]
def post_install(self, integration, organization):
repos = Repository.objects.filter(
organization_id=organization.id,
provider='bitbucket',
)
unmigrateable_repos = self \
.get_installation(integration, organization.id) \
.get_unmigratable_repositories()
for repo in filter(lambda r: r not in unmigrateable_repos, repos):
repo.update(integration_id=integration.id)
def build_integration(self, state):
if state.get('publicKey'):
principal_data = state['principal']
return {
'provider': self.key,
'external_id': state['clientKey'],
'name': principal_data['username'],
'metadata': {
'public_key': state['publicKey'],
'shared_secret': state['sharedSecret'],
'base_url': state['baseApiUrl'],
'domain_name': principal_data['links']['html']['href'].replace('https://', ''),
'icon': principal_data['links']['avatar']['href'],
'scopes': self.scopes,
'uuid': principal_data['uuid'],
'type': principal_data['type'], # team or user account
},
}
else:
return {
'provider': self.key,
'external_id': state['external_id'],
'expect_exists': True,
}
def setup(self):
from sentry.plugins import bindings
bindings.add(
'integration-repository.provider',
BitbucketRepositoryProvider,
id='integrations:%s' % self.key,
)
class VerifyInstallation(PipelineView):
def dispatch(self, request, pipeline):
try:
integration = get_integration_from_request(request, BitbucketIntegrationProvider.key)
except AtlassianConnectValidationError:
return pipeline.error('Unable to verify installation.')
pipeline.bind_state('external_id', integration.external_id)
return pipeline.next_step()
|
from credmark.cmf.model import Model
@Model.describe(
slug='contrib.neilz',
display_name='An example of a contrib model',
description="This model exists simply as an example of how and where to \
contribute a model to the Credmark framework",
version='1.0',
developer='neilz.eth',
output=dict
)
class MyModel(Model):
def run(self, input):
return {
"credmarkFounder": "Neil",
"message": "You are a modeler. Thank you modeler."
}
|
from abc import abstractmethod
from collections import namedtuple
from copy import deepcopy
#
import numpy as np
#from pysurf.logger import get_logger
from ..utils.osutils import exists_and_isfile
from ..database.database import Database
from ..database.dbtools import DBVariable
from ..logger import Logger, get_logger
#
from ..system import Molecule
from ..sampling.base_sampler import SamplerFactory
from .base_sampler import DynCondition, CrdCondition
from .normalmodes import Mode
#
from colt import Colt
class Sampler(Colt):
_questions = """
method = :: str
#State whether the system is a model system
model = False :: bool
"""
@classmethod
def _extend_questions(cls, questions):
questions.generate_cases("method", {name: method.questions
for name, method in SamplerFactory._methods.items()})
@classmethod
def from_config(cls, config):
sampler = cls._get_sampler(config['method'])
return cls(config, sampler)
@classmethod
def from_inputfile(cls, inputfile):
# Generate the config
config = cls.generate_input(inputfile, config=inputfile)
return cls.from_config(config)
def __init__(self, config, sampler, logger=None):
""" Sampling always goes with a database, if not needed use Sampler class
"""
self.config = config
self.sampler = sampler
self._start = 0
if logger is None:
self.logger = get_logger('sampler.log', 'sampler')
else:
self.logger = logger
def get_condition(self, idx):
return self.sampler.get_condition()
def __iter__(self):
self._start = 1 # skip equilibrium structure
return self
def __next__(self):
cond = self.get_condition(self._start)
if cond is not None:
self._start += 1
return cond
raise StopIteration
@staticmethod
def _get_sampler(config):
return SamplerFactory._methods[config.value].from_config(config)
@property
def equilibrium(self):
return self.sampler.get_init()
|
import asyncio
import logging
import aioredis
from aiohttp.web import Application
from aiohttp_traversal.router import TraversalRouter
from . import resources
from . import views
log = logging.getLogger(__name__)
def includeme(app):
app.router.bind_view(resources.Root, views.Root)
app.router.bind_view(resources.Url, views.Url)
@asyncio.coroutine
def setup_redis(app, host='localhost', port=6379, db=0):
app['redis'] = yield from connect_to_redis(app.loop, host, port, db)
app.register_on_finish(lambda app: app['redis'].clear())
@asyncio.coroutine
def connect_to_redis(loop, host='localhost', port=6379, db=0):
log.info('connecting to redis ({}:{}/{})'.format(host, port, db))
return (yield from aioredis.create_pool(
(host, port), db=db,
minsize=5, maxsize=10,
loop=loop))
def configure(loop, redis):
app = Application(loop=loop, router=TraversalRouter())
app.router.set_root_factory(resources.Root)
includeme(app)
loop.run_until_complete(setup_redis(app, *redis))
return app
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Refnet Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the blocksdir option.
"""
import os
import shutil
from test_framework.test_framework import RefnetTestFramework, initialize_datadir
class BlocksdirTest(RefnetTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.stop_node(0)
assert os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest", "blocks"))
assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "blocks"))
shutil.rmtree(self.nodes[0].datadir)
initialize_datadir(self.options.tmpdir, 0)
self.log.info("Starting with nonexistent blocksdir ...")
blocksdir_path = os.path.join(self.options.tmpdir, 'blocksdir')
self.nodes[0].assert_start_raises_init_error(["-blocksdir=" + blocksdir_path], 'Error: Specified blocks directory "{}" does not exist.'.format(blocksdir_path))
os.mkdir(blocksdir_path)
self.log.info("Starting with existing blocksdir ...")
self.start_node(0, ["-blocksdir=" + blocksdir_path])
self.log.info("mining blocks..")
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
assert os.path.isfile(os.path.join(blocksdir_path, "regtest", "blocks", "blk00000.dat"))
assert os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest", "blocks", "index"))
if __name__ == '__main__':
BlocksdirTest().main()
|
# -*- coding: utf-8 -*-
"""Click commands."""
from subprocess import call
import click
from flask import current_app
from flask.cli import with_appcontext
from werkzeug.exceptions import MethodNotAllowed, NotFound
from pathlib import Path
from itertools import chain
from flaskshop.random_data import (
create_users,
create_menus,
create_shipping_methods,
create_products_by_schema,
create_page,
create_collections_by_schema,
create_admin,
create_orders,
create_product_sales,
create_vouchers,
create_dashboard_menus,
create_roles,
)
from flaskshop.extensions import db
from flaskshop.corelib.db import rdb
from flaskshop.public.search import Item
from flaskshop.product.models import Product
HERE = Path(__file__).resolve()
PROJECT_ROOT = HERE.parent
TEST_PATH = "tests"
@click.command()
def test():
"""Run the tests."""
print(call(f"pytest {TEST_PATH}", shell=True))
@click.command()
@click.option(
"-f",
"--fix-imports",
default=False,
is_flag=True,
help="Fix imports using isort, before linting",
)
def lint(fix_imports):
"""Lint and check code style with flake8 and isort."""
skip = ["node_modules", "requirements"]
root_files = Path(PROJECT_ROOT).glob("*.py")
root_directories = (
file for file in Path(PROJECT_ROOT).iterdir() if not file.name.startswith(".")
)
files_and_directories = [
arg.name for arg in chain(root_files, root_directories) if arg.name not in skip
]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
click.echo(f"{description}: {' '.join(command_line)}")
rv = call(command_line)
if rv != 0:
exit(rv)
if fix_imports:
execute_tool("Fixing import order", "isort", "-rc")
execute_tool("Checking code style", "flake8")
@click.command()
def clean():
"""Remove *.pyc and *.pyo files recursively starting at current directory.
Borrowed from Flask-Script, converted to use Click.
"""
for file in chain(
Path(PROJECT_ROOT).glob("**/*.pyc"), Path(PROJECT_ROOT).glob("**/*.pyo")
):
click.echo(f"Removing {file}")
file.unlink()
@click.command()
@click.option("--url", default=None, help="Url to test (ex. /static/image.png)")
@click.option(
"--order", default="rule", help="Property on Rule to order by (default: rule)"
)
@with_appcontext
def urls(url, order):
"""Display all of the url matching routes for the project.
Borrowed from Flask-Script, converted to use Click.
"""
rows = []
column_headers = ("Rule", "Endpoint", "Arguments")
if url:
try:
rule, arguments = current_app.url_map.bind("localhost").match(
url, return_rule=True
)
rows.append((rule.rule, rule.endpoint, arguments))
column_length = 3
except (NotFound, MethodNotAllowed) as e:
rows.append((f"<{e}>", None, None))
column_length = 1
else:
rules = sorted(
current_app.url_map.iter_rules(), key=lambda rule: getattr(rule, order)
)
for rule in rules:
rows.append((rule.rule, rule.endpoint, None))
column_length = 2
str_template = ""
table_width = 0
if column_length >= 1:
max_rule_length = max(len(r[0]) for r in rows)
max_rule_length = max_rule_length if max_rule_length > 4 else 4
str_template += "{:" + str(max_rule_length) + "}"
table_width += max_rule_length
if column_length >= 2:
max_endpoint_length = max(len(str(r[1])) for r in rows)
# max_endpoint_length = max(rows, key=len)
max_endpoint_length = max_endpoint_length if max_endpoint_length > 8 else 8
str_template += " {:" + str(max_endpoint_length) + "}"
table_width += 2 + max_endpoint_length
if column_length >= 3:
max_arguments_length = max(len(str(r[2])) for r in rows)
max_arguments_length = max_arguments_length if max_arguments_length > 9 else 9
str_template += " {:" + str(max_arguments_length) + "}"
table_width += 2 + max_arguments_length
click.echo(str_template.format(*column_headers[:column_length]))
click.echo("-" * table_width)
for row in rows:
click.echo(str_template.format(*row[:column_length]))
@click.command()
@with_appcontext
def createdb():
""" create database tables
"""
db.create_all()
@click.command()
@click.option("--type", default="default", help="which type to seed")
@with_appcontext
def seed(type):
""" Generate random data for test.
"""
if type == "default":
place_holder = Path("placeholders")
create_products_by_schema(
placeholder_dir=place_holder, how_many=10, create_images=True
)
create_generator = chain(
create_collections_by_schema(place_holder),
create_users(),
create_roles(),
create_admin(),
create_page(),
create_menus(),
create_shipping_methods(),
create_dashboard_menus(),
create_orders(),
create_product_sales(),
create_vouchers(),
)
for msg in create_generator:
click.echo(msg)
elif type == "product":
place_holder = Path("placeholders")
create_products_by_schema(
placeholder_dir=place_holder, how_many=10, create_images=True
)
else:
create_dict = {
"user": create_users,
"menu": create_menus,
"ship": create_shipping_methods,
"order": create_orders,
"sale": create_product_sales,
"voucher": create_vouchers,
"dashboard": create_dashboard_menus,
"role": create_roles,
"create_admin": create_admin,
}
fn = create_dict[type]
for msg in fn():
click.echo(msg)
@click.command()
@with_appcontext
def flushrdb():
""" Clear all redis keys, include cache and propitems.
"""
rdb.flushdb()
@click.command()
@with_appcontext
def reindex():
""" clear elastic-search items.
"""
Item._index.delete(ignore=404)
Item.init()
products = Product.query.all()
Item.bulk_update(products, op_type="create")
|
import datetime
class CreateHeader:
def __init__(self, comment=u'//'):
self._comment=comment
self._name=''
self._submitDate=''
self._assignment=''
def setName(self, name):
self._name=name
def setSubmitDate(self, date=None):
self._submitDate=date
if date is None:
_d=datetime.datetime.now()
self._submitDate=_d.strftime('%Y-%m-%d %H:%M:%S')
def setAssignment(self, assignment):
self._assignment=assignment
def getHeader(self):
_str=self._comment + u'#'*60
_str+='\n'
_str+=self._comment + '\n'
_str+=self._comment + ' Assignment: %s\n' % self._assignment
_str+=self._comment + ' Name: %s\n' % self._name
_str+=self._comment + ' Submit Date: %s\n' % self._submitDate
_str+=self._comment + '\n'
_str+=self._comment + '#'*60
_str+='\n'
return _str
if __name__ == '__main__':
_ch=CreateHeader(comment=u'#')
_ch.setSubmitDate()
print(_ch.getHeader())
|
""" Main optical flow calculations
* :py:func:`calc_optical_flow`: Driver script for optical flow calculation and plotting
* :py:class:`AnalyzeFlow`: Optical flow analysis pipeline class
"""
# Imports
import shutil
import pathlib
from typing import Tuple, List, Optional
# 3rd party
import numpy as np
from scipy.integrate import simps
from skimage.feature import peak_local_max
from skimage.transform import downscale_local_mean
from skimage.filters import gaussian
from sklearn.cluster import KMeans
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Our own imports
from .utils import (
calculate_stats, convert_warp_to_color, fit_kernel_smoothed_bins,
optical_flow, read_movie, refine_signal_peaks
)
from .consts import (
DOWNSAMPLE_RAW, MIN_VEL_MAG, MIN_ANG_MAG, SUFFIX,
MAX_VEL_MAG, MAX_DISP_MAG, FIGSIZE, PALETTE, TIME_SCALE, SPACE_SCALE,
SAMPLES_AROUND_PEAK, SMOOTH_SIGMA, SMOOTH_HALFWIDTH, N_CLUSTERS,
)
# Classes
class AnalyzeFlow(object):
""" Analyze the optical flow in the data
:param Path infile:
The path to the input file to analyze
:param Path outdir:
The path to the directory to write the plots and analysis to
:param int max_frames:
If >0, maximum number of frames to load
:param str suffix:
Suffix to use when saving plots
:param float smoothing_sigma:
Standard deviation of the gaussian filter
:param int half_width:
Half width of the temporal window to smooth traces over
:param int n_clusters:
Number of clusters to split the time series data into
:param float min_vel_mag:
Minimum magnitude for a pixel to be considered moving
:param float max_vel_mag:
Maximum magnitude for plotting the velocities
:param float min_ang_mag:
Minimum velocity magnitude to use to calculate angles
:param float max_disp_mag:
Maximum total displacement magnitude to plot
"""
def __init__(self,
infile: pathlib.Path,
outdir: pathlib.Path,
max_frames: int = -1,
suffix: str = SUFFIX,
smoothing_sigma: float = SMOOTH_SIGMA,
half_width: int = SMOOTH_HALFWIDTH,
n_clusters: int = N_CLUSTERS,
min_vel_mag: float = MIN_VEL_MAG,
max_vel_mag: float = MAX_VEL_MAG,
min_ang_mag: float = MIN_ANG_MAG,
max_disp_mag: float = MAX_DISP_MAG,
time_scale: float = TIME_SCALE,
space_scale: float = SPACE_SCALE,
downsample_raw: int = DOWNSAMPLE_RAW,
samples_around_peak: int = SAMPLES_AROUND_PEAK):
self.infile = infile
self.outdir = outdir
self.suffix = suffix
self.max_frames = max_frames
self.smoothing_sigma = smoothing_sigma
self.half_width = half_width
self.n_clusters = n_clusters
# Handle the space and time scales
self.time_scale = time_scale
self.space_scale = space_scale * downsample_raw
self.min_vel_mag = min_vel_mag
self.max_vel_mag = max_vel_mag
self.min_ang_mag = min_ang_mag
self.max_disp_mag = max_disp_mag
self.downsample_raw = downsample_raw
self.samples_around_peak = samples_around_peak
self.figsize = FIGSIZE
self.palette = sns.color_palette(PALETTE, n_colors=n_clusters)
self.rows = self.cols = None
self.xx = self.yy = None
self.smooth_frames = []
self.uu_frames = []
self.vv_frames = []
self.labels = None
self.label_image = None
self.timeline_mag = None
self.timeline_cumulative_mag = None
self.timeline_mag_peaks = None
self.timeline_cumulative_mag_peaks = None
@property
def max_frame(self) -> int:
""" Max frame index """
return len(self.smooth_frames)
@property
def unique_label_ids(self) -> List:
""" Return a list of unique label ids """
return list(sorted(np.unique(self.labels)))
def calc_velocities(self):
""" Calculate all the velocities for the data """
self.rows = self.cols = None
self.xx = self.yy = None
self.smooth_frames = []
self.uu_frames = []
self.vv_frames = []
print(f'Loading {self.infile}')
for i, frame in enumerate(read_movie(self.infile)):
if self.max_frames > 0 and i >= self.max_frames:
break
print(f'Loading frame {i:3d} shape: {frame.shape}')
if frame.ndim == 3:
frame = np.mean(frame, axis=2)
assert frame.ndim == 2
# Smooth and downsample the frames to reduce noise
frame = frame / 255
smooth_frame = gaussian(frame, self.smoothing_sigma)
down_smooth_frame = downscale_local_mean(smooth_frame, (self.downsample_raw, self.downsample_raw))
# Store off the coordinate system from the downscaled images
if self.rows is None and self.cols is None:
self.rows, self.cols = down_smooth_frame.shape
x = np.arange(0, self.cols) * self.space_scale
y = np.arange(0, self.rows) * self.space_scale
self.xx, self.yy = np.meshgrid(x, y)
else:
assert down_smooth_frame.shape == (self.rows, self.cols)
self.smooth_frames.append(down_smooth_frame)
if len(self.smooth_frames) < 2:
self.uu_frames.append(np.zeros_like(down_smooth_frame))
self.vv_frames.append(np.zeros_like(down_smooth_frame))
continue
# Finally, calculate optical flow
frame1 = self.smooth_frames[i-1]
frame2 = self.smooth_frames[i]
flow_uu, flow_vv = optical_flow(frame1, frame2)
self.uu_frames.append(flow_uu * self.space_scale / self.time_scale)
self.vv_frames.append(flow_vv * self.space_scale / self.time_scale)
# Make sure we padded all the vectors right
assert len(self.smooth_frames) == len(self.uu_frames)
assert len(self.smooth_frames) == len(self.vv_frames)
def smooth_velocities(self):
""" Do a simple temporal smoothing to reduce noise """
print('Generating smoothed velocities...')
self.smooth_uu_frames = []
self.smooth_vv_frames = []
for i in range(self.max_frame):
i_st = max([0, i - self.half_width])
i_ed = min([self.max_frame, i + self.half_width + 1])
mean_uu_frame = np.mean(self.uu_frames[i_st:i_ed], axis=0)
mean_vv_frame = np.mean(self.vv_frames[i_st:i_ed], axis=0)
self.smooth_uu_frames.append(mean_uu_frame)
self.smooth_vv_frames.append(mean_vv_frame)
def accumulate_displacements(self):
""" Calculate cumulative displacement over the time series """
print('Generating cumulative displacements...')
self.cumulative_uu_frames = []
self.cumulative_vv_frames = []
self.cumulative_mag = []
# Accumulate the values in the frames
uu_total = np.zeros_like(self.smooth_uu_frames[0])
vv_total = np.zeros_like(self.smooth_vv_frames[0])
for i in range(self.max_frame):
# Total the displacement: ds = dv*dt
uu_total += self.smooth_uu_frames[i] * self.time_scale
vv_total += self.smooth_vv_frames[i] * self.time_scale
mag_total = np.sqrt(uu_total**2, vv_total**2)
self.cumulative_uu_frames.append(uu_total)
self.cumulative_vv_frames.append(vv_total)
self.cumulative_mag.append(mag_total)
def cluster_timeseries(self):
""" Cluster the timeseries using the magnitude """
print(f'Clustering pixels into {self.n_clusters} clusters by magnitude...')
smooth_mag = np.sqrt(np.array(self.smooth_uu_frames)**2 + np.array(self.smooth_vv_frames)**2)
# Image coordinates (aka trig is hard when you're standing on your head):
# x = -vv, y = uu
# ang = arctan2(y, x) = arctan2(uu, -vv)
smooth_ang = np.arctan2(np.array(self.smooth_uu_frames),
-np.array(self.smooth_vv_frames)) % np.pi
smooth_mag = np.reshape(smooth_mag, (self.max_frame, -1))
smooth_ang = np.reshape(smooth_ang, (self.max_frame, -1))
cumulative_mag = np.reshape(np.array(self.cumulative_mag), (self.max_frame, -1))
print(f'Got {smooth_mag.shape[1]} total traces')
assert smooth_mag.shape[1] == smooth_ang.shape[1]
assert smooth_mag.shape[1] == cumulative_mag.shape[1]
# Find the peak velocity over all of time for the trace
peak_vel = np.percentile(smooth_mag, 95, axis=0)
# If a trace has at least one peak higher than the min, add it to the large traces
mask = peak_vel > self.min_vel_mag
large_mag = smooth_mag[:, mask]
large_ang = smooth_ang[:, mask]
large_cumulative_mag = cumulative_mag[:, mask]
assert large_mag.ndim == 2
assert large_mag.shape[0] == self.max_frame
assert large_ang.shape[0] == self.max_frame
assert large_cumulative_mag.shape[0] == self.max_frame
# Find only the largest traces
print(f'Got {large_mag.shape[1]} large traces')
if large_mag.shape[1] > 10:
tree = KMeans(n_clusters=self.n_clusters)
self.labels = tree.fit_predict(large_mag.swapaxes(0, 1))
label_image = np.zeros_like(mask, dtype=np.int)
label_image[mask] = (self.labels + 1)
self.label_image = np.reshape(label_image, (self.rows, self.cols))
else:
# FIXME: Not sure if this part actually works right...
self.labels = np.zeros((0, ))
self.label_image = np.zeros((self.rows, self.cols))
self.large_mag = large_mag
self.large_ang = large_ang
self.large_cumulative_mag = large_cumulative_mag
print('Binning vectors by angle...')
bin_x, bin_y, kernel_x, kernel_y = fit_kernel_smoothed_bins(
large_ang, large_mag, min_ang_mag=self.min_ang_mag)
self.kernel_bins = bin_x
self.kernel_bin_density = bin_y
self.kernel_ang = kernel_x
self.kernel_density = kernel_y
self.kernel_label_density = []
self.kernel_label_bin_density = []
for label_id in self.unique_label_ids:
print(f'Binning vectors by angle for label {label_id + 1}...')
mask = self.labels == label_id
if np.sum(mask) < 1:
bin_y = np.zeros_like(self.kernel_bins)
kernel_y = np.zeros_like(self.kernel_ang)
else:
label_mag = (large_mag[:, self.labels == label_id]).flatten()
label_ang = (large_ang[:, self.labels == label_id]).flatten()
_, bin_y, _, kernel_y = fit_kernel_smoothed_bins(
label_ang, label_mag, min_ang_mag=self.min_ang_mag)
assert self.kernel_bins.shape == bin_y.shape
assert self.kernel_ang.shape == kernel_y.shape
self.kernel_label_bin_density.append(bin_y)
self.kernel_label_density.append(kernel_y)
def cluster_mag_timelines(self):
""" Accumulate timelines for the large magnitutes """
large_mag = self.large_mag
large_cumulative_mag = self.large_cumulative_mag
# Accumulate timelines for the large angles and magnitutes
print('Averaging displacements over time...')
self.timeline = np.arange(self.max_frame) * self.time_scale
timeline_mag = []
timeline_cumulative_mag = []
for label_id in [None] + self.unique_label_ids:
if label_id is None:
mask = np.ones(self.labels.shape, dtype=np.bool)
label_name = 'AllClusters'
else:
mask = self.labels == label_id
label_name = f'Cluster{label_id + 1}'
print(f'Averaging timelines for {label_name}...')
print(f'{label_name} samples: {np.sum(mask)}')
# Subset each of the timelines by ROI
print('Calculating velocity stats...')
label_mag = pd.DataFrame(calculate_stats(large_mag[:, mask], axis=1))
assert self.timeline.shape[0] == label_mag.shape[0]
label_mag['cluster'] = label_name
label_mag['timepoint'] = self.timeline
print('Calculating displacement stats...')
label_cumulative_mag = pd.DataFrame(calculate_stats(large_cumulative_mag[:, mask], axis=1))
assert self.timeline.shape[0] == label_cumulative_mag.shape[0]
label_cumulative_mag['cluster'] = label_name
label_cumulative_mag['timepoint'] = self.timeline
timeline_mag.append(label_mag)
timeline_cumulative_mag.append(label_cumulative_mag)
self.timeline_mag = pd.concat(timeline_mag, ignore_index=True)
self.timeline_cumulative_mag = pd.concat(timeline_cumulative_mag, ignore_index=True)
def calc_peak_stats(self,
dataset: str = 'velocity',
metric: str = 'mean'):
""" Calculate the peaks and stats for a given set of waveforms
:param str dataset:
Which data set to call peaks for
:param str metric:
Which measurement to call peaks using
:param int samples_around_peak:
How many samples to enforce around a peak
"""
if dataset == 'velocity':
data = self.timeline_mag
if self.timeline_mag_peaks is None:
all_peaks = []
else:
all_peaks = [self.timeline_mag_peaks]
elif dataset == 'displacement':
data = self.timeline_cumulative_mag
if self.timeline_cumulative_mag_peaks is None:
all_peaks = []
else:
all_peaks = [self.timeline_cumulative_mag_peaks]
else:
raise KeyError(f'Unknown data set "{dataset}"')
print(f'Calling peaks for {metric} {dataset}...')
all_labels = np.unique(data['cluster'])
for label in all_labels:
df = data[data['cluster'] == label]
timeline = df['timepoint'].values
signal = df[metric].values
peak_indicies = peak_local_max(signal,
min_distance=self.samples_around_peak,
indices=True)
peaks = pd.DataFrame(refine_signal_peaks(timeline, signal, peak_indicies))
peaks['cluster'] = label
peaks['metric'] = metric
print(f'Got {peaks.shape[0]} peaks for cluster {label}')
all_peaks.append(peaks)
all_peaks = pd.concat(all_peaks, ignore_index=True)
if dataset == 'velocity':
self.timeline_mag_peaks = all_peaks
elif dataset == 'displacement':
self.timeline_cumulative_mag_peaks = all_peaks
else:
raise KeyError(f'Unknown data set "{dataset}"')
def calc_velocity_peak_stats(self):
""" Call peaks for the velocity data """
for metric in ['mean', 'max']:
self.calc_peak_stats('velocity', metric)
def calc_displacement_peak_stats(self):
""" Call peaks for the displacement data """
for metric in ['mean', 'max']:
self.calc_peak_stats('displacement', metric)
def clear_outdir(self):
""" Clean the output directory """
if self.outdir.is_dir():
print(f'Overwriting: {self.outdir}')
shutil.rmtree(str(self.outdir))
self.outdir.mkdir(parents=True)
def extract_peaks(self,
peaks: Optional[pd.DataFrame],
cluster: str,
metric: str) -> Tuple[List[int]]:
""" Pull out the peak indices for a particular cluster
:param DataFrame peaks:
The peak call data frame
:param str cluster:
The cluster to load peak calls for
:param str metric:
The metric to load peak calls for
:returns:
Two lists of low_indices, high_indices
"""
if peaks is None:
return [], []
mask = np.logical_and(peaks['metric'] == metric,
peaks['cluster'] == cluster)
if np.sum(mask) < 1:
return [], []
# Pull out the peak indices and de-duplicate
metric_peaks = peaks[mask]
low_peaks = set(metric_peaks['peak_start_index'].values)
low_peaks.update(metric_peaks['peak_end_index'].values)
low_peaks = [int(i) for i in sorted(low_peaks)]
high_peaks = [int(i) for i in sorted(set(metric_peaks['peak_index'].values))]
return low_peaks, high_peaks
def plot_averaged_timeseries(self,
dataset: str = 'velocity',
metric: str = 'mean',
markersize: float = 10):
""" Convert the raw collections to time series traces
:param str dataset:
Which data set to plot the time series for
:param str metric:
Which measurement to plot along the time series
"""
# Switch based on the selected data set
if dataset == 'velocity':
data = self.timeline_mag
peaks = self.timeline_mag_peaks
if metric == 'max':
ylim = [0, self.max_vel_mag*2.5]
else:
ylim = [0, self.max_vel_mag*1.1]
ylabel = 'Velocity Magnitude ($\\mu m/sec$)'
title = f'{metric.capitalize()} Velocity Magnitude'
elif dataset == 'displacement':
data = self.timeline_cumulative_mag
peaks = self.timeline_cumulative_mag_peaks
if metric == 'max':
ylim = [0, self.max_disp_mag*5.0]
else:
ylim = [0, self.max_disp_mag*1.1]
ylabel = 'Total Displacement ($\\mu m$)'
title = f'{metric.capitalize()} Total Displacement'
else:
raise KeyError(f'Unknown data set "{dataset}"')
has_ci = f'{metric} ci low' in data.columns and f'{metric} ci high' in data.columns
xlabel = 'Timepoint (sec)'
xlim = [0, self.max_frame*self.time_scale]
print(f'Generating {metric} {dataset} timeseries plots...')
figsize_x, figsize_y = self.figsize
outdir = self.outdir / f'{dataset}_{metric}_timeseries'
outdir.mkdir(parents=True, exist_ok=True)
# Make a velocity timeseries of the composite data
outfile = outdir / f'{dataset}_{metric}_timeseries_all{self.suffix}'
df = data[data['cluster'] == 'AllClusters']
cluster_x = df['timepoint'].values
cluster_y = df[metric].values
color = 'black'
fig, ax = plt.subplots(1, 1, figsize=(figsize_x, figsize_y))
if has_ci:
ax.fill_between(cluster_x, df[metric + ' ci low'], df[metric + ' ci high'],
color=color, alpha=0.5)
ax.plot(cluster_x, cluster_y, color=color, linewidth=3, label='AllClusters')
low_peaks, high_peaks = self.extract_peaks(peaks, cluster='AllClusters', metric=metric)
for idx in low_peaks:
ax.plot(cluster_x[idx], cluster_y[idx], 'o', color=color, markersize=markersize)
for idx in high_peaks:
ax.plot(cluster_x[idx], cluster_y[idx], '*', color=color, markersize=markersize)
for label_id in self.unique_label_ids:
label_name = f'Cluster{label_id + 1}'
df = data[data['cluster'] == label_name]
cluster_x = df['timepoint'].values
cluster_y = df[metric].values
color = self.palette[label_id]
if has_ci:
ax.fill_between(cluster_x, df[metric + ' ci low'], df[metric + ' ci high'],
color=color, alpha=0.5)
ax.plot(cluster_x, cluster_y, color=color, label=label_name)
low_peaks, high_peaks = self.extract_peaks(peaks, cluster=label_name, metric=metric)
for idx in low_peaks:
ax.plot(cluster_x[idx], cluster_y[idx], 'o', color=color, markersize=markersize)
for idx in high_peaks:
ax.plot(cluster_x[idx], cluster_y[idx], '*', color=color, markersize=markersize)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend()
ax.set_title(title)
fig.savefig(outfile, transparent=True)
plt.close()
# Make a velocity timeseries for all the clusters only
outfile = outdir / f'{dataset}_{metric}_timeseries_clusters{self.suffix}'
fig, ax = plt.subplots(1, 1, figsize=(figsize_x, figsize_y))
for label_id in self.unique_label_ids:
label_name = f'Cluster{label_id + 1}'
df = data[data['cluster'] == label_name]
cluster_x = df['timepoint'].values
cluster_y = df[metric].values
color = self.palette[label_id]
if has_ci:
ax.fill_between(cluster_x, df[metric + ' ci low'], df[metric + ' ci high'],
color=color, alpha=0.5)
ax.plot(cluster_x, cluster_y, color=color, label=label_name)
low_peaks, high_peaks = self.extract_peaks(peaks, cluster=label_name, metric=metric)
for idx in low_peaks:
ax.plot(cluster_x[idx], cluster_y[idx], 'o', color=color, markersize=markersize)
for idx in high_peaks:
ax.plot(cluster_x[idx], cluster_y[idx], '*', color=color, markersize=markersize)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.legend()
ax.set_title(title)
fig.savefig(outfile, transparent=True)
plt.close()
# Make plots for the individual clusters
outfile = outdir / f'{dataset}_{metric}_timeseries_all_clusters{self.suffix}'
df = data[data['cluster'] == 'AllClusters']
cluster_x = df['timepoint'].values
cluster_y = df[metric].values
color = 'black'
fig, ax = plt.subplots(1, 1, figsize=(figsize_x, figsize_y))
if has_ci:
ax.fill_between(cluster_x, df[metric + ' ci low'], df[metric + ' ci high'],
color=color, alpha=0.5)
ax.plot(cluster_x, cluster_y, color=color, linewidth=3, label='AllClusters')
low_peaks, high_peaks = self.extract_peaks(peaks, cluster='AllClusters', metric=metric)
for idx in low_peaks:
ax.plot(cluster_x[idx], cluster_y[idx], 'o', color=color, markersize=markersize)
for idx in high_peaks:
ax.plot(cluster_x[idx], cluster_y[idx], '*', color=color, markersize=markersize)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title('All Clusters ' + title)
fig.savefig(outfile, transparent=True)
plt.close()
for label_id in self.unique_label_ids:
outfile = outdir / f'{dataset}_{metric}_timeseries_cluster{label_id+1:02d}{self.suffix}'
label_name = f'Cluster{label_id + 1}'
fig, ax = plt.subplots(1, 1, figsize=(figsize_x, figsize_y))
label_name = f'Cluster{label_id + 1}'
df = data[data['cluster'] == label_name]
cluster_x = df['timepoint'].values
cluster_y = df[metric].values
color = self.palette[label_id]
if has_ci:
ax.fill_between(cluster_x, df[metric + ' ci low'], df[metric + ' ci high'],
color=color, alpha=0.5)
ax.plot(cluster_x, cluster_y, color=color, label=label_name)
low_peaks, high_peaks = self.extract_peaks(peaks, cluster=label_name, metric=metric)
for idx in low_peaks:
ax.plot(cluster_x[idx], cluster_y[idx], 'o', color=color, markersize=markersize)
for idx in high_peaks:
ax.plot(cluster_x[idx], cluster_y[idx], '*', color=color, markersize=markersize)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(f'{label_name} {title}')
fig.savefig(outfile, transparent=True)
plt.close()
def plot_velocity_timeseries(self):
""" Generate velocity timeseries plots """
for metric in ['mean', 'max']:
self.plot_averaged_timeseries(dataset='velocity', metric=metric)
def plot_displacement_timeseries(self):
""" Plot the timeseries for the displacement data """
for metric in ['mean', 'max']:
self.plot_averaged_timeseries(dataset='displacement', metric=metric)
def plot_cluster_label_image(self):
""" Plot the label key for the cluster image """
figsize_x, figsize_y = self.figsize
# Label the ROIs to match the clusters
label_colors = np.zeros(self.label_image.shape + (3, ))
for label_id in self.unique_label_ids:
label_colors[self.label_image == label_id + 1, :] = self.palette[label_id]
# Make the image of the cluster labels only
outfile = self.outdir / f'cluster_image{self.suffix}'
fig, ax = plt.subplots(1, 1, figsize=(figsize_x, figsize_y))
ax.imshow(label_colors)
ax.set_xticks([])
ax.set_yticks([])
fig.savefig(outfile, transparent=True)
plt.close()
def plot_angle_distribution(self):
""" Plot the distribution of angles """
figsize_x, figsize_y = 8, 8
# Plot everything togethe
outfile = self.outdir / 'angle' / f'all_bins{self.suffix}'
outfile.parent.mkdir(parents=True, exist_ok=True)
fig = plt.figure(figsize=(figsize_x, figsize_y))
ax = fig.add_subplot(1, 1, 1, projection='polar')
ax.plot(self.kernel_ang, self.kernel_density, '-', color='black', linewidth=4)
ax.set_thetamin(0)
ax.set_thetamax(180)
ax.set_xticks(np.pi/180. * np.linspace(0, 180, 7, endpoint=True))
ax.set_yticks([])
fig.savefig(outfile, transparent=True)
plt.close()
# Plot all the clusters on a polar plot
outfile = self.outdir / 'angle' / f'all_clusters_polar{self.suffix}'
outfile.parent.mkdir(parents=True, exist_ok=True)
fig = plt.figure(figsize=(figsize_x, figsize_y))
ax = fig.add_subplot(1, 1, 1, projection='polar')
ax.plot(self.kernel_ang, self.kernel_density, '-', color='black', linewidth=4,
label='All Clusters')
for label_id, label_density in enumerate(self.kernel_label_density):
ax.plot(self.kernel_ang, label_density, '-', color=self.palette[label_id],
label=f'Cluster {label_id+1}')
ax.set_thetamin(0)
ax.set_thetamax(180)
ax.set_xticks(np.pi/180. * np.linspace(0, 180, 7, endpoint=True))
ax.set_yticks([])
ax.legend()
fig.savefig(outfile, transparent=True)
plt.close()
# Plot all the clusters on a linear space, to verify that the areas make sense
outfile = self.outdir / 'angle' / f'all_clusters_flat{self.suffix}'
outfile.parent.mkdir(parents=True, exist_ok=True)
fig = plt.figure(figsize=(figsize_x, figsize_y))
ax = fig.add_subplot(1, 1, 1)
label_area = simps(self.kernel_density, self.kernel_ang)
print(f'Overall Distribution Area: {label_area:0.5f}')
ax.plot(self.kernel_ang, self.kernel_density, '-', color='black', linewidth=4,
label='All Clusters')
for label_id, label_density in enumerate(self.kernel_label_density):
label_area = simps(label_density, self.kernel_ang)
print(f'Label {label_id} Area: {label_area:0.5f}')
ax.plot(self.kernel_ang, label_density, '-', color=self.palette[label_id],
label=f'Cluster {label_id+1}')
ax.set_xlim([-0.1, np.pi+0.1])
ax.set_xticks(np.pi/180. * np.linspace(0, 180, 7, endpoint=True))
ax.set_xticklabels([f'{t:0.0f}' for t in np.linspace(0, 180, 7, endpoint=True)])
ax.set_xlabel('Direction (deg)')
ax.set_ylabel('Probability Density')
ax.legend()
fig.savefig(outfile, transparent=True)
plt.close()
# Only plot the main cluster
outfile = self.outdir / 'angle' / f'main_cluster{self.suffix}'
outfile.parent.mkdir(parents=True, exist_ok=True)
fig = plt.figure(figsize=(figsize_x, figsize_y))
ax = fig.add_subplot(1, 1, 1, projection='polar')
ax.bar(self.kernel_bins, self.kernel_bin_density)
ax.plot(self.kernel_ang, self.kernel_density, '-', color='black', linewidth=2)
ax.set_thetamin(0)
ax.set_thetamax(180)
ax.set_xticks(np.pi/180. * np.linspace(0, 180, 7, endpoint=True))
ax.set_yticks([])
fig.savefig(outfile, transparent=True)
plt.close()
# Plot each cluster individually
for label_id, label_density in enumerate(self.kernel_label_density):
outfile = self.outdir / 'angle' / f'cluster{label_id:02d}{self.suffix}'
outfile.parent.mkdir(parents=True, exist_ok=True)
fig = plt.figure(figsize=(figsize_x, figsize_y))
ax = fig.add_subplot(1, 1, 1, projection='polar')
ax.bar(self.kernel_bins, self.kernel_label_bin_density[label_id])
ax.plot(self.kernel_ang, label_density, '-', color=self.palette[label_id])
ax.set_thetamin(0)
ax.set_thetamax(180)
ax.set_xticks(np.pi/180. * np.linspace(0, 180, 7, endpoint=True))
ax.set_yticks([])
fig.savefig(outfile, transparent=True)
plt.close()
def plot_velocities(self):
""" Make the velocity plots """
figsize_x, figsize_y = self.figsize
for i, frame in enumerate(self.smooth_frames):
flow_uu = self.smooth_uu_frames[i]
flow_vv = self.smooth_vv_frames[i]
flow_color = convert_warp_to_color(flow_uu, flow_vv)
outfile = self.outdir / 'frame_color_split' / f'frame{i:03d}.tif'
outfile.parent.mkdir(parents=True, exist_ok=True)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(figsize_x*2, figsize_y))
ax1.imshow(frame, cmap='gray')
ax1.set_xticks([])
ax1.set_yticks([])
ax2.imshow(flow_color)
ax2.set_xticks([])
ax2.set_yticks([])
fig.savefig(outfile, transparent=True)
plt.close()
outfile = self.outdir / 'color' / f'frame{i:03d}.tif'
outfile.parent.mkdir(parents=True, exist_ok=True)
fig, ax = plt.subplots(1, 1, figsize=(figsize_x, figsize_y))
ax.imshow(flow_color)
ax.set_xticks([])
ax.set_yticks([])
fig.savefig(outfile, transparent=True)
plt.close()
flow_color_alpha = np.sqrt(flow_uu**2 + flow_vv**2) / self.max_vel_mag
flow_color_comp = flow_color * flow_color_alpha[:, :, np.newaxis]
composite = np.stack([frame, frame, frame], axis=2)
composite += flow_color_comp
composite[composite > 1] = 1
composite[composite < 0] = 0
outfile = self.outdir / 'frame_color_merge' / f'frame{i:03d}.tif'
outfile.parent.mkdir(parents=True, exist_ok=True)
fig, ax = plt.subplots(1, 1, figsize=(figsize_x, figsize_y))
ax.imshow(composite)
ax.set_xticks([])
ax.set_yticks([])
fig.savefig(outfile, transparent=True)
plt.close()
def save_angle_distribution(self):
""" Write out the angle distribution table """
print('Saving angle distribution...')
outfile = self.outdir / 'angle_dist.xlsx'
outfile.parent.mkdir(parents=True, exist_ok=True)
# Write out the angle clusters for each image
df = {
'Angle': self.kernel_ang,
'AllClusters': self.kernel_density,
}
for label_id, label_density in enumerate(self.kernel_label_density):
df[f'Cluster{label_id+1:02d}'] = label_density
df = pd.DataFrame(df)
df.to_excel(str(outfile))
def save_timeline_distribution(self):
""" Write out the timeline distribution and cumulative distribution """
print('Saving velocity distribution...')
# Write out the displacement magnitude clusters for each image
if self.timeline_mag is not None:
outfile = self.outdir / 'timeline_mag.xlsx'
self.timeline_mag.to_excel(str(outfile), index=False)
if self.timeline_mag_peaks is not None:
outfile = self.outdir / 'timeline_mag_peaks.xlsx'
self.timeline_mag_peaks.to_excel(str(outfile), index=False)
# Write out the cumulative displacement clusters for each image
print('Saving displacement distribution...')
outfile = self.outdir / 'timeline_cumulative_mag.xlsx'
if self.timeline_cumulative_mag is not None:
self.timeline_cumulative_mag.to_excel(str(outfile), index=False)
if self.timeline_cumulative_mag_peaks is not None:
outfile = self.outdir / 'timeline_cumulative_mag_peaks.xlsx'
self.timeline_cumulative_mag_peaks.to_excel(str(outfile), index=False)
# Main function
def calc_optical_flow(*args, **kwargs):
""" Calculate the optical flow and plot it for a movie
:param Path infile:
The path to the input file to analyze
:param Path outdir:
The path to the directory to write the plots and analysis to
:param int max_frames:
If >0, maximum number of frames to load
"""
proc = AnalyzeFlow(*args, **kwargs)
proc.calc_velocities()
proc.smooth_velocities()
proc.accumulate_displacements()
proc.cluster_timeseries()
# proc.cluster_mag_timelines()
# proc.calc_velocity_peak_stats()
# proc.calc_displacement_peak_stats()
# Plots
proc.clear_outdir()
proc.plot_angle_distribution()
# proc.plot_velocities()
# proc.plot_cluster_label_image()
# proc.plot_velocity_timeseries()
# proc.plot_displacement_timeseries()
# Output data
proc.save_angle_distribution()
# proc.save_timeline_distribution()
|
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from snippets import views
urlpatterns = [
path('snippets/', views.SnippetList.as_view(), name='snippet-list'),
path('snippets/<int:pk>/', views.SnippetDetail.as_view(), name='snippet-detail'),
path('snippets/<int:pk>/highlight/',
views.SnippetHighlight.as_view(), name='snippet-highlight'), # new
path('users/', views.UserList.as_view(), name='user-list'),
path('users/<int:pk>/', views.UserDetail.as_view(), name='user-detail'),
path('', views.api_root),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
import unittest
from artifactcli.artifact import BasicInfo
class TestBasicInfo(unittest.TestCase):
def setUp(self):
self.test_data = [
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None),
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1.2', 'zip', 345),
]
class A(object):
pass
def test_eq(self):
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) ==
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) ==
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) ==
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'zip', None))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) ==
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) ==
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 124))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) ==
BasicInfo('com.github.mogprojectX', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) ==
BasicInfo('com.github.mogproject', 'xxx-yyy-assemblyX', '0.1-SNAPSHOT', 'jar', 123))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) ==
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOTX', 'jar', 123))
self.assertFalse(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) == 123)
def test_ne(self):
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) !=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) !=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) !=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'zip', None))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) !=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) !=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 124))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) !=
BasicInfo('com.github.mogprojectX', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) !=
BasicInfo('com.github.mogproject', 'xxx-yyy-assemblyX', '0.1-SNAPSHOT', 'jar', 123))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) !=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOTX', 'jar', 123))
self.assertTrue(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) != 123)
def test_lt(self):
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) <
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) <
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10) <
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1) <
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10) <
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1) <
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) <
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertTrue(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) < 123)
self.assertTrue(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) < '123')
self.assertFalse(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) < self.A())
def test_le(self):
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) <=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) <=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10) <=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1) <=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10) <=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1) <=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) <=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertTrue(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) <= 123)
self.assertTrue(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) <= '123')
self.assertFalse(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) <= self.A())
def test_gt(self):
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) >
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) >
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10) >
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1) >
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10) >
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1) >
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) >
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertFalse(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) > 123)
self.assertFalse(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) > '123')
self.assertTrue(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) > self.A())
def test_ge(self):
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) >=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None) >=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10) >=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1) >=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10) >=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1))
self.assertFalse(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 1) >=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertTrue(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) >=
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 10))
self.assertFalse(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) >= 123)
self.assertFalse(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) >= '123')
self.assertTrue(BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', 123) >= self.A())
def test_str_type(self):
self.assertTrue(all(isinstance(x.__str__(), str) for x in self.test_data))
def test_repr(self):
self.assertEqual(
repr(self.test_data[0]),
"BasicInfo(group_id='com.github.mogproject', artifact_id='xxx-yyy-assembly'," +
" version='0.1-SNAPSHOT', packaging='jar', revision=None)")
self.assertEqual(
repr(self.test_data[1]),
"BasicInfo(group_id='com.github.mogproject', artifact_id='xxx-yyy-assembly'," +
" version='0.1.2', packaging='zip', revision=345)")
def test_filename(self):
self.assertEqual(
BasicInfo('com.github.mogproject', 'xxx-yyy-assembly', '0.1-SNAPSHOT', 'jar', None).filename(),
'xxx-yyy-assembly-0.1-SNAPSHOT.jar')
self.assertEqual(
BasicInfo('GROUP_ID', 'xxxxxx', '1.2.3', 'zip', 123).filename(),
'xxxxxx-1.2.3.zip')
def test_is_version_like(self):
self.assertEqual(BasicInfo._is_version_like('0'), True)
self.assertEqual(BasicInfo._is_version_like('1'), True)
self.assertEqual(BasicInfo._is_version_like('0.2'), True)
self.assertEqual(BasicInfo._is_version_like('0.0.3'), True)
self.assertEqual(BasicInfo._is_version_like('1.2.3.4'), True)
self.assertEqual(BasicInfo._is_version_like(''), False)
self.assertEqual(BasicInfo._is_version_like('a'), False)
self.assertEqual(BasicInfo._is_version_like('.'), False)
self.assertEqual(BasicInfo._is_version_like('0.'), False)
self.assertEqual(BasicInfo._is_version_like('0.1.'), False)
self.assertEqual(BasicInfo._is_version_like('0..1'), False)
self.assertEqual(BasicInfo._is_version_like('1.2.3.4.a'), False)
def test_from_path(self):
self.assertEqual(BasicInfo.from_path('a', '/home/user/xxx-yyy-assembly-0.1.0-SNAPSHOT.jar'),
BasicInfo('a', 'xxx-yyy-assembly', '0.1.0-SNAPSHOT', 'jar', None))
self.assertEqual(BasicInfo.from_path('b', '/home/user/xxx-yyy-assembly-0.1.jar'),
BasicInfo('b', 'xxx-yyy-assembly', '0.1', 'jar', None))
self.assertEqual(BasicInfo.from_path('c', '/a/b/c/d/dist/d-0.1.0.zip'),
BasicInfo('c', 'd', '0.1.0', 'zip', None))
self.assertEqual(BasicInfo.from_path('d', 'a/b/c/d/target/universal/d-2.0-1.0-SNAPSHOT.zip'),
BasicInfo('d', 'd-2.0', '1.0-SNAPSHOT', 'zip', None))
def test_from_path_error(self):
self.assertRaises(ValueError, BasicInfo.from_path, 'a', 'zip')
self.assertRaises(ValueError, BasicInfo.from_path, 'b', '.zip')
self.assertRaises(ValueError, BasicInfo.from_path, 'c', '0.1.2.zip')
self.assertRaises(ValueError, BasicInfo.from_path, 'd', 'a-b-c.zip')
self.assertRaises(ValueError, BasicInfo.from_path, 'e', 'a-b-c-0')
self.assertRaises(ValueError, BasicInfo.from_path, 'f', 'a-b-c-0.1.2.')
def test_s3_path(self):
self.assertEqual(self.test_data[1].s3_path(),
'com.github.mogproject/xxx-yyy-assembly/0.1.2/345/xxx-yyy-assembly-0.1.2.zip')
def test_s3_path_error(self):
self.assertRaises(ValueError, self.test_data[0].s3_path)
def test_dict_conversions(self):
self.assertEqual(BasicInfo.from_dict(self.test_data[0].to_dict()), self.test_data[0])
self.assertEqual(BasicInfo.from_dict(self.test_data[1].to_dict()), self.test_data[1])
|
from matrix import *
import random
import LED_display as LMD
import threading
import time
import timeit
def LED_init():
thread = threading.Thread(target=LMD.main, args=())
thread.setDaemon(True)
thread.start()
return
def draw_matrix(m):
array = m.get_array()
for y in range(m.get_dy()):
for x in range(3, m.get_dx()-3):
if array[y][x] == 0:
LMD.set_pixel(y, 18-x, 0)
elif array[y][x] == 1:
LMD.set_pixel(y, 18-x, 3)
elif array[y][x] == 2:
LMD.set_pixel(y, 18-x, 3)
elif array[y][x] == 3:
LMD.set_pixel(y, 18-x, 4)
elif array[y][x] == 7:
LMD.set_pixel(y, 18-x, 1)
else:
LMD.set_pixel(y, 18-x, 4)
print()
def num_matrix(number):
if number == 0:
return zero
elif number == 1:
return one
elif number == 2:
return two
elif number == 3:
return three
elif number == 4:
return four
elif number == 5:
return five
elif number == 6:
return six
elif number == 7:
return seven
elif number == 8:
return eight
elif number == 9:
return nine
### integer variables: must always be integer!
iScreenDy = 32
iScreenDx = 16
iScreenDw = 3
top = 27
left = iScreenDw + iScreenDx//2 - 2
newCarNeeded = False
arrayTime = [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ]
zero = [[0, 7, 7, 7],
[0, 7, 0, 7],
[0, 7, 0, 7],
[0, 7, 0, 7],
[0, 7, 7, 7]]
one = [[0, 0, 0, 7],
[0, 0, 7, 7],
[0, 0, 0, 7],
[0, 0, 0, 7],
[0, 0, 0, 7]]
two = [[0, 7, 7, 7],
[0, 0, 0, 7],
[0, 7, 7, 7],
[0, 7, 0, 0],
[0, 7, 7, 7]]
three = [[0, 7, 7, 7],
[0, 0, 0, 7],
[0, 7, 7, 7],
[0, 0, 0, 7],
[0, 7, 7, 7]]
four = [[0, 7, 0, 7],
[0, 7, 0, 7],
[0, 7, 7, 7],
[0, 0, 0, 7],
[0, 0, 0, 7]]
five = [[0, 7, 7, 7],
[0, 7, 0, 0],
[0, 7, 7, 7],
[0, 0, 0, 7],
[0, 7, 7, 7]]
six = [[0, 7, 7, 7],
[0, 7, 0, 0],
[0, 7, 7, 7],
[0, 7, 0, 7],
[0, 7, 7, 7]]
seven = [[0, 7, 7, 7],
[0, 7, 0, 7],
[0, 7, 0, 7],
[0, 0, 0, 7],
[0, 0, 0, 7]]
eight = [[0, 7, 7, 7],
[0, 7, 0, 7],
[0, 7, 7, 7],
[0, 7, 0, 7],
[0, 7, 7, 7]]
nine = [[0, 7, 7, 7],
[0, 7, 0, 7],
[0, 7, 7, 7],
[0, 0, 0, 7],
[0, 7, 7, 7]]
ones = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
tens = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
hunds = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
thnds = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
iScreen = Matrix(arrayTime)
oScreen = Matrix(iScreen)
oScreen.paste(Matrix(thnds),0,3)
oScreen.paste(Matrix(hunds),0,7)
oScreen.paste(Matrix(tens),0,11)
oScreen.paste(Matrix(ones),0,15)
LED_init()
draw_matrix(oScreen)
print()
start = timeit.default_timer()
while True:
now = timeit.default_timer()
score = round(now-start, 1)*10
thnds = num_matrix(int(score // 1000))
hunds = num_matrix(int((score - ((score // 1000)*1000)) // 100))
tens = num_matrix(int((score - ((score // 1000)*1000) - ((score - ((score // 1000)*1000)) // 100*100)) // 10))
ones = num_matrix(int(score % 10))
oScreen = Matrix(iScreen)
oScreen.paste(Matrix(thnds),0,3)
oScreen.paste(Matrix(hunds),0,7)
oScreen.paste(Matrix(tens),0,11)
oScreen.paste(Matrix(ones),0,15)
draw_matrix(oScreen)
print(score)
# ~999.9 secs ( about 16 mins )
if(score == 9999):
print("!! You WIN !!")
break
print(score)
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2common.constants.pack import SYSTEM_PACK_NAME
from st2common.models.system.common import ResourceReference
__all__ = [
'WEBHOOKS_PARAMETERS_SCHEMA',
'WEBHOOKS_PAYLOAD_SCHEMA',
'INTERVAL_PARAMETERS_SCHEMA',
'DATE_PARAMETERS_SCHEMA',
'CRON_PARAMETERS_SCHEMA',
'TIMER_PAYLOAD_SCHEMA',
'ACTION_SENSOR_TRIGGER',
'NOTIFY_TRIGGER',
'ACTION_FILE_WRITTEN_TRIGGER',
'INQUIRY_TRIGGER',
'TIMER_TRIGGER_TYPES',
'WEBHOOK_TRIGGER_TYPES',
'WEBHOOK_TRIGGER_TYPE',
'INTERNAL_TRIGGER_TYPES',
'SYSTEM_TRIGGER_TYPES',
'INTERVAL_TIMER_TRIGGER_REF',
'DATE_TIMER_TRIGGER_REF',
'CRON_TIMER_TRIGGER_REF',
'TRIGGER_INSTANCE_STATUSES',
'TRIGGER_INSTANCE_PENDING',
'TRIGGER_INSTANCE_PROCESSING',
'TRIGGER_INSTANCE_PROCESSED',
'TRIGGER_INSTANCE_PROCESSING_FAILED'
]
# Action resource triggers
ACTION_SENSOR_TRIGGER = {
'name': 'st2.generic.actiontrigger',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating the completion of an action execution.',
'payload_schema': {
'type': 'object',
'properties': {
'execution_id': {},
'status': {},
'start_timestamp': {},
'action_name': {},
'action_ref': {},
'runner_ref': {},
'parameters': {},
'result': {}
}
}
}
ACTION_FILE_WRITTEN_TRIGGER = {
'name': 'st2.action.file_writen',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating action file being written on disk.',
'payload_schema': {
'type': 'object',
'properties': {
'ref': {},
'file_path': {},
'host_info': {}
}
}
}
NOTIFY_TRIGGER = {
'name': 'st2.generic.notifytrigger',
'pack': SYSTEM_PACK_NAME,
'description': 'Notification trigger.',
'payload_schema': {
'type': 'object',
'properties': {
'execution_id': {},
'status': {},
'start_timestamp': {},
'end_timestamp': {},
'action_ref': {},
'runner_ref': {},
'channel': {},
'route': {},
'message': {},
'data': {}
}
}
}
INQUIRY_TRIGGER = {
'name': 'st2.generic.inquiry',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger indicating a new "inquiry" has entered "pending" status',
'payload_schema': {
'type': 'object',
'properties': {
'id': {
'type': 'string',
'description': 'ID of the new inquiry.',
'required': True
},
'route': {
'type': 'string',
'description': 'An arbitrary value for allowing rules '
'to route to proper notification channel.',
'required': True
}
},
"additionalProperties": False
}
}
# Sensor spawn/exit triggers.
SENSOR_SPAWN_TRIGGER = {
'name': 'st2.sensor.process_spawn',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger indicating sensor process is started up.',
'payload_schema': {
'type': 'object',
'properties': {
'object': {}
}
}
}
SENSOR_EXIT_TRIGGER = {
'name': 'st2.sensor.process_exit',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger indicating sensor process is stopped.',
'payload_schema': {
'type': 'object',
'properties': {
'object': {}
}
}
}
# KeyValuePair resource triggers
KEY_VALUE_PAIR_CREATE_TRIGGER = {
'name': 'st2.key_value_pair.create',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating datastore item creation.',
'payload_schema': {
'type': 'object',
'properties': {
'object': {}
}
}
}
KEY_VALUE_PAIR_UPDATE_TRIGGER = {
'name': 'st2.key_value_pair.update',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating datastore set action.',
'payload_schema': {
'type': 'object',
'properties': {
'object': {}
}
}
}
KEY_VALUE_PAIR_VALUE_CHANGE_TRIGGER = {
'name': 'st2.key_value_pair.value_change',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating a change of datastore item value.',
'payload_schema': {
'type': 'object',
'properties': {
'old_object': {},
'new_object': {}
}
}
}
KEY_VALUE_PAIR_DELETE_TRIGGER = {
'name': 'st2.key_value_pair.delete',
'pack': SYSTEM_PACK_NAME,
'description': 'Trigger encapsulating datastore item deletion.',
'payload_schema': {
'type': 'object',
'properties': {
'object': {}
}
}
}
# Internal system triggers which are available for each resource
INTERNAL_TRIGGER_TYPES = {
'action': [
ACTION_SENSOR_TRIGGER,
NOTIFY_TRIGGER,
ACTION_FILE_WRITTEN_TRIGGER,
INQUIRY_TRIGGER
],
'sensor': [
SENSOR_SPAWN_TRIGGER,
SENSOR_EXIT_TRIGGER
],
'key_value_pair': [
KEY_VALUE_PAIR_CREATE_TRIGGER,
KEY_VALUE_PAIR_UPDATE_TRIGGER,
KEY_VALUE_PAIR_VALUE_CHANGE_TRIGGER,
KEY_VALUE_PAIR_DELETE_TRIGGER
]
}
WEBHOOKS_PARAMETERS_SCHEMA = {
'type': 'object',
'properties': {
'url': {
'type': 'string',
'required': True
}
},
'additionalProperties': False
}
WEBHOOKS_PAYLOAD_SCHEMA = {
'type': 'object'
}
WEBHOOK_TRIGGER_TYPES = {
ResourceReference.to_string_reference(SYSTEM_PACK_NAME, 'st2.webhook'): {
'name': 'st2.webhook',
'pack': SYSTEM_PACK_NAME,
'description': ('Trigger type for registering webhooks that can consume'
' arbitrary payload.'),
'parameters_schema': WEBHOOKS_PARAMETERS_SCHEMA,
'payload_schema': WEBHOOKS_PAYLOAD_SCHEMA
}
}
WEBHOOK_TRIGGER_TYPE = WEBHOOK_TRIGGER_TYPES.keys()[0]
# Timer specs
INTERVAL_PARAMETERS_SCHEMA = {
"type": "object",
"properties": {
"timezone": {
"type": "string"
},
"unit": {
"enum": ["weeks", "days", "hours", "minutes", "seconds"],
"required": True
},
"delta": {
"type": "integer",
"required": True
}
},
"additionalProperties": False
}
DATE_PARAMETERS_SCHEMA = {
"type": "object",
"properties": {
"timezone": {
"type": "string"
},
"date": {
"type": "string",
"format": "date-time",
"required": True
}
},
"additionalProperties": False
}
CRON_PARAMETERS_SCHEMA = {
"type": "object",
"properties": {
"timezone": {
"type": "string"
},
"year": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
},
"month": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 1,
"maximum": 12
},
"day": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 1,
"maximum": 31
},
"week": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 1,
"maximum": 53
},
"day_of_week": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 0,
"maximum": 6
},
"hour": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 0,
"maximum": 23
},
"minute": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 0,
"maximum": 59
},
"second": {
"anyOf": [
{"type": "string"},
{"type": "integer"}
],
"minimum": 0,
"maximum": 59
},
"start_date": {
"type": "string",
"format": "date-time",
},
"end_date": {
"type": "string",
"format": "date-time",
}
},
"additionalProperties": False
}
TIMER_PAYLOAD_SCHEMA = {
"type": "object",
"properties": {
"executed_at": {
"type": "string",
"format": "date-time",
"default": "2014-07-30 05:04:24.578325"
},
"schedule": {
"type": "object",
"default": {
"delta": 30,
"units": "seconds"
}
}
}
}
INTERVAL_TIMER_TRIGGER_REF = ResourceReference.to_string_reference(SYSTEM_PACK_NAME,
'st2.IntervalTimer')
DATE_TIMER_TRIGGER_REF = ResourceReference.to_string_reference(SYSTEM_PACK_NAME, 'st2.DateTimer')
CRON_TIMER_TRIGGER_REF = ResourceReference.to_string_reference(SYSTEM_PACK_NAME, 'st2.CronTimer')
TIMER_TRIGGER_TYPES = {
INTERVAL_TIMER_TRIGGER_REF: {
'name': 'st2.IntervalTimer',
'pack': SYSTEM_PACK_NAME,
'description': 'Triggers on specified intervals. e.g. every 30s, 1week etc.',
'payload_schema': TIMER_PAYLOAD_SCHEMA,
'parameters_schema': INTERVAL_PARAMETERS_SCHEMA
},
DATE_TIMER_TRIGGER_REF: {
'name': 'st2.DateTimer',
'pack': SYSTEM_PACK_NAME,
'description': 'Triggers exactly once when the current time matches the specified time. '
'e.g. timezone:UTC date:2014-12-31 23:59:59.',
'payload_schema': TIMER_PAYLOAD_SCHEMA,
'parameters_schema': DATE_PARAMETERS_SCHEMA
},
CRON_TIMER_TRIGGER_REF: {
'name': 'st2.CronTimer',
'pack': SYSTEM_PACK_NAME,
'description': 'Triggers whenever current time matches the specified time constaints like '
'a UNIX cron scheduler.',
'payload_schema': TIMER_PAYLOAD_SCHEMA,
'parameters_schema': CRON_PARAMETERS_SCHEMA
}
}
SYSTEM_TRIGGER_TYPES = dict(WEBHOOK_TRIGGER_TYPES.items() + TIMER_TRIGGER_TYPES.items())
# various status to record lifecycle of a TriggerInstance
TRIGGER_INSTANCE_PENDING = 'pending'
TRIGGER_INSTANCE_PROCESSING = 'processing'
TRIGGER_INSTANCE_PROCESSED = 'processed'
TRIGGER_INSTANCE_PROCESSING_FAILED = 'processing_failed'
TRIGGER_INSTANCE_STATUSES = [
TRIGGER_INSTANCE_PENDING,
TRIGGER_INSTANCE_PROCESSING,
TRIGGER_INSTANCE_PROCESSED,
TRIGGER_INSTANCE_PROCESSING_FAILED
]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import sys
import tempfile
import time
import shutil
import glob
import json
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from dump_test_utils import generate_dump_json, generate_dump_json_with_overflow, check_dump_structure
from tests.security_utils import security_off_wrap
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.add = P.Add()
def construct(self, x_, y_):
return self.add(x_, y_)
x = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
y = np.array([[7, 8, 9], [10, 11, 12]]).astype(np.float32)
def run_async_dump(test_name):
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
with tempfile.TemporaryDirectory(dir='/tmp') as tmp_dir:
dump_path = os.path.join(tmp_dir, 'async_dump')
dump_config_path = os.path.join(tmp_dir, 'async_dump.json')
generate_dump_json(dump_path, dump_config_path, test_name)
os.environ['MINDSPORE_DUMP_CONFIG'] = dump_config_path
dump_file_path = os.path.join(dump_path, 'rank_0', 'Net', '0', '0')
if os.path.isdir(dump_path):
shutil.rmtree(dump_path)
add = Net()
add(Tensor(x), Tensor(y))
for _ in range(3):
if not os.path.exists(dump_file_path):
time.sleep(2)
check_dump_structure(dump_path, dump_config_path, 1, 1, 1)
assert len(os.listdir(dump_file_path)) == 1
# check content of the generated dump data
if test_name == "test_async_dump_npy":
output_name = "Add.Add-op*.*.*.*.output.0.ND.npy"
output_path = glob.glob(os.path.join(dump_file_path, output_name))[0]
real_path = os.path.realpath(output_path)
output = np.load(real_path)
expect = np.array([[8, 10, 12], [14, 16, 18]], np.float32)
assert np.array_equal(output, expect)
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@security_off_wrap
def test_async_dump_npy():
"""
Feature: async dump on Ascend
Description: test async dump with file_format = "npy"
Expectation: dump data are generated as npy file format
"""
run_async_dump("test_async_dump_npy")
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@security_off_wrap
def test_async_dump_bin():
"""
Feature: async dump on Ascend in npy format
Description: test async dump with file_format = "bin"
Expectation: dump data are generated as protobuf file format (suffix with timestamp)
"""
run_async_dump("test_async_dump_bin")
def run_overflow_dump(test_name):
"""Run async dump and generate overflow"""
if sys.platform != 'linux':
return
overflow_x = np.array([60000, 60000]).astype(np.float16)
with tempfile.TemporaryDirectory(dir='/tmp') as tmp_dir:
dump_path = os.path.join(tmp_dir, 'overflow_dump')
dump_config_path = os.path.join(tmp_dir, 'overflow_dump.json')
generate_dump_json_with_overflow(dump_path, dump_config_path, test_name, 3)
os.environ['MINDSPORE_DUMP_CONFIG'] = dump_config_path
if os.path.isdir(dump_path):
shutil.rmtree(dump_path)
add = Net()
add(Tensor(overflow_x), Tensor(overflow_x))
exe_graph_path = os.path.join(dump_path, 'rank_0', 'Net', '0', '0')
for _ in range(5):
if not os.path.exists(exe_graph_path):
time.sleep(2)
check_dump_structure(dump_path, dump_config_path, 1, 1, 1)
# check if overflow dump generate exact two files, and the naming format
assert len(os.listdir(exe_graph_path)) == 2
output_path = glob.glob(os.path.join(exe_graph_path, "Add.Add-op*.*.*.*.output.0.ND.npy"))[0]
overflow_path = glob.glob(os.path.join(exe_graph_path, "Opdebug.Node_OpDebug.*.*.*.output.0.json"))[0]
assert output_path
assert overflow_path
# check content of the output tensor
real_path = os.path.realpath(output_path)
output = np.load(real_path)
expect = np.array([65504, 65504], np.float16)
assert np.array_equal(output, expect)
# check content of opdebug info json file
with open(overflow_path, 'rb') as json_file:
data = json.load(json_file)
assert data
del os.environ['MINDSPORE_DUMP_CONFIG']
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@security_off_wrap
def test_ascend_overflow_dump():
"""
Feature: Overflow Dump
Description: Test overflow dump
Expectation: Overflow is occurred, and overflow dump file is in correct format
"""
context.set_context(mode=context.GRAPH_MODE, device_target='Ascend')
run_overflow_dump("test_async_dump_npy")
|
# -*- coding: UTF-8 -*-
import datetime
import re
from django.contrib.auth.models import Group
from common.config import SysConfig
from sql.models import QueryPrivilegesApply, Users, SqlWorkflow, ResourceGroup
from sql.utils.resource_group import auth_group_users
from common.utils.sendmsg import MsgSender
from common.utils.const import WorkflowDict
from sql.utils.workflow_audit import Audit
import logging
logger = logging.getLogger('default')
def notify_for_audit(audit_id, **kwargs):
"""
工作流消息通知,不包含工单执行结束的通知
:param audit_id:
:param kwargs:
:return:
"""
# 判断是否开启消息通知,未开启直接返回
sys_config = SysConfig()
wx_status = sys_config.get('wx')
if not sys_config.get('mail') and not sys_config.get('ding') and not wx_status:
logger.info('未开启消息通知,可在系统设置中开启')
return None
wx_msg_content = ''
# 获取审核信息
audit_detail = Audit.detail(audit_id=audit_id)
audit_id = audit_detail.audit_id
workflow_audit_remark = kwargs.get('audit_remark', '')
base_url = sys_config.get('archery_base_url', 'http://127.0.0.1:8000').rstrip('/')
workflow_url = "{base_url}/workflow/{audit_id}".format(base_url=base_url, audit_id=audit_detail.audit_id)
msg_cc_email = kwargs.get('email_cc', [])
workflow_id = audit_detail.workflow_id
workflow_type = audit_detail.workflow_type
status = audit_detail.current_status
workflow_title = audit_detail.workflow_title
workflow_from = audit_detail.create_user_display
group_name = audit_detail.group_name
webhook_url = ResourceGroup.objects.get(group_id=audit_detail.group_id).ding_webhook
# 获取当前审批和审批流程
workflow_auditors, current_workflow_auditors = Audit.review_info(audit_detail.workflow_id,
audit_detail.workflow_type)
# 准备消息内容
if workflow_type == WorkflowDict.workflow_type['query']:
workflow_type_display = WorkflowDict.workflow_type['query_display']
workflow_detail = QueryPrivilegesApply.objects.get(apply_id=workflow_id)
instance = workflow_detail.instance.instance_name
db_name = ' '
if workflow_detail.priv_type == 1:
workflow_content = '''数据库清单:{}\n授权截止时间:{}\n结果集:{}\n'''.format(
workflow_detail.db_list,
datetime.datetime.strftime(workflow_detail.valid_date, '%Y-%m-%d %H:%M:%S'),
workflow_detail.limit_num)
elif workflow_detail.priv_type == 2:
db_name = workflow_detail.db_list
workflow_content = '''数据库:{}\n表清单:{}\n授权截止时间:{}\n结果集:{}\n'''.format(
workflow_detail.db_list,
workflow_detail.table_list,
datetime.datetime.strftime(workflow_detail.valid_date, '%Y-%m-%d %H:%M:%S'),
workflow_detail.limit_num)
else:
workflow_content = ''
elif workflow_type == WorkflowDict.workflow_type['sqlreview']:
workflow_type_display = WorkflowDict.workflow_type['sqlreview_display']
workflow_detail = SqlWorkflow.objects.get(pk=workflow_id)
instance = workflow_detail.instance.instance_name
db_name = workflow_detail.db_name
workflow_content = re.sub('[\r\n\f]{2,}', '\n',
workflow_detail.sqlworkflowcontent.sql_content[0:500].replace('\r', ''))
else:
raise Exception('工单类型不正确')
# 准备消息格式
if status == WorkflowDict.workflow_status['audit_wait']: # 申请阶段
msg_title = "[{}]新的工单申请#{}".format(workflow_type_display, audit_id)
# 接收人,发送给该资源组内对应权限组所有的用户
auth_group_names = Group.objects.get(id=audit_detail.current_audit).name
msg_to = auth_group_users([auth_group_names], audit_detail.group_id)
# 消息内容
msg_content = '''发起时间:{}\n发起人:{}\n组:{}\n目标实例:{}\n数据库:{}\n审批流程:{}\n当前审批:{}\n工单名称:{}\n工单地址:{}\n工单详情预览:{}\n'''.format(
workflow_detail.create_time.strftime('%Y-%m-%d %H:%M:%S'),
workflow_from,
group_name,
instance,
db_name,
workflow_auditors,
current_workflow_auditors,
workflow_title,
workflow_url,
workflow_content)
# 企业微信消息格式
if wx_status:
wx_msg_content = "[【{}】新的工单申请(点击查看)]({})\n" \
">发起时间:<font color=\"comment\">{}</font>\n" \
">发起人:<font color=\"comment\">{}</font>\n" \
">组:<font color=\"comment\">{}</font>\n" \
">目标实例:<font color=\"comment\">{}</font>\n"\
">数据库:<font color=\"comment\">{}</font>\n" \
">审批流程:<font color=\"comment\">{}</font>\n" \
">当前审批:<font color=\"comment\">{}</font>\n" \
">工单名称:<font color=\"comment\">{}</font>\n".format(
workflow_type_display, workflow_url, workflow_detail.create_time.strftime('%Y-%m-%d %H:%M:%S'),
workflow_from, group_name, instance, db_name, workflow_auditors, current_workflow_auditors,
workflow_title
)
elif status == WorkflowDict.workflow_status['audit_success']: # 审核通过
msg_title = "[{}]工单审核通过#{}".format(workflow_type_display, audit_id)
# 接收人,仅发送给申请人
msg_to = [Users.objects.get(username=audit_detail.create_user)]
# 消息内容
msg_content = '''发起时间:{}\n发起人:{}\n组:{}\n目标实例:{}\n数据库:{}\n审批流程:{}\n工单名称:{}\n工单地址:{}\n工单详情预览:{}\n'''.format(
workflow_detail.create_time.strftime('%Y-%m-%d %H:%M:%S'),
workflow_from,
group_name,
instance,
db_name,
workflow_auditors,
workflow_title,
workflow_url,
workflow_content)
if wx_status:
wx_msg_content = "[【{}】工单审核通过(点击查看)]({})\n" \
">发起时间:<font color=\"comment\">{}</font>\n" \
">发起人:<font color=\"comment\">{}</font>\n" \
">组:<font color=\"comment\">{}</font>\n" \
">目标实例:<font color=\"comment\">{}</font>\n" \
">数据库:<font color=\"comment\">{}</font>\n" \
">审批流程:<font color=\"comment\">{}</font>\n" \
">工单名称:<font color=\"comment\">{}</font>\n".format(
workflow_type_display, workflow_url, workflow_detail.create_time.strftime('%Y-%m-%d %H:%M:%S'),
workflow_from, group_name, instance, db_name, workflow_auditors, workflow_title
)
elif status == WorkflowDict.workflow_status['audit_reject']: # 审核驳回
msg_title = "[{}]工单被驳回#{}".format(workflow_type_display, audit_id)
# 接收人,仅发送给申请人
msg_to = [Users.objects.get(username=audit_detail.create_user)]
# 消息内容
msg_content = '''发起时间:{}\n目标实例:{}\n数据库:{}\n工单名称:{}\n工单地址:{}\n驳回原因:{}\n提醒:此工单被审核不通过,请按照驳回原因进行修改!'''.format(
workflow_detail.create_time.strftime('%Y-%m-%d %H:%M:%S'),
instance,
db_name,
workflow_title,
workflow_url,
workflow_audit_remark)
if wx_status:
wx_msg_content = "[【{}】工单被驳回(点击查看)]({})\n" \
">发起时间:<font color=\"comment\">{}</font>\n" \
">目标实例:<font color=\"comment\">{}</font>\n" \
">数据库:<font color=\"comment\">{}</font>\n" \
">工单名称:<font color=\"comment\">{}</font>\n" \
">驳回原因:<font color=\"comment\">{}</font>\n" \
"提醒:此工单审核不通过,请按照驳回原因进行修改!".format(
workflow_type_display, workflow_url, workflow_detail.create_time.strftime('%Y-%m-%d %H:%M:%S'),
instance, db_name, workflow_title, workflow_audit_remark
)
elif status == WorkflowDict.workflow_status['audit_abort']: # 审核取消,通知所有审核人
msg_title = "[{}]提交人主动终止工单#{}".format(workflow_type_display, audit_id)
# 接收人,发送给该资源组内对应权限组所有的用户
auth_group_names = [Group.objects.get(id=auth_group_id).name for auth_group_id in
audit_detail.audit_auth_groups.split(',')]
msg_to = auth_group_users(auth_group_names, audit_detail.group_id)
# 消息内容
msg_content = '''发起时间:{}\n发起人:{}\n组:{}\n目标实例:{}\n数据库:{}\n工单名称:{}\n工单地址:{}\n终止原因:{}'''.format(
workflow_detail.create_time.strftime('%Y-%m-%d %H:%M:%S'),
workflow_from,
group_name,
instance,
db_name,
workflow_title,
workflow_url,
workflow_audit_remark)
if wx_status:
wx_msg_content = "[【{}】提交人主动终止工单(点击查看)]({})\n" \
">发起时间:<font color=\"comment\">{}</font>\n" \
">发起人:<font color=\"comment\">{}</font>\n" \
">组:<font color=\"comment\">{}</font>\n" \
">目标实例:<font color=\"comment\">{}</font>\n" \
">数据库:<font color=\"comment\">{}</font>\n" \
">工单名称:<font color=\"comment\">{}</font>\n" \
">终止原因:<font color=\"comment\">{}</font>\n".format(
workflow_type_display, workflow_url, workflow_detail.create_time.strftime('%Y-%m-%d %H:%M:%S'),
workflow_from, group_name, instance, db_name, workflow_title, workflow_audit_remark
)
else:
raise Exception('工单状态不正确')
# 处理接收人信息
msg_to_email = [user.email for user in msg_to if user.email]
msg_to_ding_user = [user.ding_user_id for user in msg_to if user.ding_user_id]
# 发送通知
msg_sender = MsgSender()
if sys_config.get('mail'):
msg_sender.send_email(msg_title, msg_content, msg_to_email, list_cc_addr=msg_cc_email)
if sys_config.get('ding') and webhook_url:
msg_sender.send_ding(webhook_url, msg_title + '\n' + msg_content)
if sys_config.get('ding_to_person') and msg_to_ding_user:
msg_sender.send_ding2user(msg_to_ding_user, msg_title + '\n' + msg_content)
if wx_status:
user_list = []
for user in msg_to:
if user.wx_user_id:
user_list.append(user.wx_user_id)
else:
user_list.append(user.username)
# user_list = [user.wx_user_id for user in msg_to if user.wx_user_id]
msg_sender.send_wx2user(wx_msg_content, user_list)
def notify_for_execute(workflow):
"""
工单执行结束的通知
:param workflow:
:return:
"""
# 判断是否开启消息通知,未开启直接返回
sys_config = SysConfig()
wx_status = sys_config.get('wx')
wx_msg_content = ''
if not sys_config.get('mail') and not sys_config.get('ding') and not sys_config.get('ding_to_person') \
and not wx_status:
logger.info('未开启消息通知,可在系统设置中开启')
return None
# 获取当前审批和审批流程
base_url = sys_config.get('archery_base_url', 'http://127.0.0.1:8000').rstrip('/')
audit_auth_group, current_audit_auth_group = Audit.review_info(workflow.id, 2)
audit_id = Audit.detail_by_workflow_id(workflow.id, 2).audit_id
url = "{base_url}/workflow/{audit_id}".format(base_url=base_url, audit_id=audit_id)
msg_title = "[{}]工单{}#{}".format(WorkflowDict.workflow_type['sqlreview_display'],
workflow.get_status_display(), audit_id)
msg_content = '''发起人:{}\n组:{}\n审批流程:{}\n工单名称:{}\n工单地址:{}\n工单详情预览:{}\n'''.format(
workflow.engineer_display,
workflow.group_name,
audit_auth_group,
workflow.workflow_name,
url,
re.sub('[\r\n\f]{2,}', '\n', workflow.sqlworkflowcontent.sql_content[0:500].replace('\r', '')))
if wx_status:
wx_msg_content = "[工单执行完毕(点击查看)]({})\n" \
">发起人:<font color=\"comment\">{}</font>\n" \
">组:<font color=\"comment\">{}</font>\n" \
">审批流程:<font color=\"comment\">{}</font>\n" \
">工单名称:<font color=\"comment\">{}</font>\n" \
">审批流程:<font color=\"comment\">{}</font>\n".format(
url, workflow.engineer_display, workflow.group_name, audit_auth_group, workflow.workflow_name,
re.sub('[\r\n\f]{2,}', '\n', workflow.sqlworkflowcontent.sql_content[0:500].replace('\r', '')))
# 邮件通知申请人,抄送DBA
msg_to = Users.objects.filter(username=workflow.engineer)
msg_cc = auth_group_users(auth_group_names=['DBA'], group_id=workflow.group_id)
# 处理接收人信息
msg_to_email = [user.email for user in msg_to if user.email]
msg_cc_email = [user.email for user in msg_cc if user.email]
msg_to_ding_user = [user.ding_user_id for user in msg_to if user.ding_user_id]
# 判断是发送钉钉还是发送邮件
msg_sender = MsgSender()
if sys_config.get('mail'):
msg_sender.send_email(msg_title, msg_content, msg_to_email, list_cc_addr=msg_cc_email)
if sys_config.get('ding'):
# 钉钉通知申请人,审核人,抄送DBA
webhook_url = ResourceGroup.objects.get(group_id=workflow.group_id).ding_webhook
if webhook_url:
MsgSender.send_ding(webhook_url, msg_title + '\n' + msg_content)
if sys_config.get('ding_to_person') and msg_to_ding_user:
msg_sender.send_ding2user(msg_to_ding_user, msg_title + '\n' + msg_content)
if wx_status:
msg_to_wx_user = []
for user in msg_to:
if user.wx_user_id:
msg_to_wx_user.append(user.wx_user_id)
else:
msg_to_wx_user.append(user.username)
msg_sender.send_wx2user(wx_msg_content, msg_to_wx_user)
# DDL通知
if sys_config.get('mail') and sys_config.get('ddl_notify_auth_group') and workflow.status == 'workflow_finish':
# 判断上线语句是否存在DDL,存在则通知相关人员
if workflow.syntax_type == 1:
# 消息内容通知
msg_title = '[Archery]有新的DDL语句执行完成#{}'.format(audit_id)
msg_content = '''发起人:{}\n变更组:{}\n变更实例:{}\n变更数据库:{}\n工单名称:{}\n工单地址:{}\n工单预览:{}\n'''.format(
Users.objects.get(username=workflow.engineer).display,
workflow.group_name,
workflow.instance.instance_name,
workflow.db_name,
workflow.workflow_name,
url,
workflow.sqlworkflowcontent.sql_content[0:500])
# 获取通知成员ddl_notify_auth_group
msg_to = Users.objects.filter(groups__name=sys_config.get('ddl_notify_auth_group'))
# 处理接收人信息
msg_to_email = [user.email for user in msg_to]
# 发送
msg_sender.send_email(msg_title, msg_content, msg_to_email)
def notify_for_binlog2sql(task):
"""
binlog2sql执行结束的通知,仅支持邮件
:param task:
:return:
"""
# 判断是否开启消息通知,未开启直接返回
sys_config = SysConfig()
if not sys_config.get('mail') and not sys_config.get('ding'):
logger.info('未开启消息通知,可在系统设置中开启')
return None
# 发送邮件通知
if task.success:
msg_title = '[Archery 通知]Binlog2SQL 执行结束'
msg_content = f'解析的SQL文件为{task.result[1]},请到指定目录查看'
msg_to = [task.result[0].email]
MsgSender().send_email(msg_title, msg_content, msg_to)
|
from django.contrib.contenttypes.models import ContentType
from rest_framework import serializers
from django.contrib.auth.models import Permission
class PermissionSerializer(serializers.ModelSerializer):
class Meta:
model = Permission
fields = '__all__'
class ContentTypeSerializer(serializers.ModelSerializer):
class Meta:
model = ContentType
fields = ['id', 'name']
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from twisted.internet.protocol import Protocol
class HelloServerProtocol(Protocol):
def connectionMade(self):
print("connectionMade")
self.transport.write('how are you?')
def dataReceived(self, data):
print("dataReceived: {}".format(data))
if __name__ == '__main__':
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.internet.protocol import Factory
from twisted.internet.endpoints import serverFromString
log.startLogging(sys.stdout)
wrappedFactory = Factory.forProtocol(HelloServerProtocol)
endpoint = serverFromString(reactor, "autobahn:tcp\:9000:url=ws\://localhost\:9000")
endpoint.listen(wrappedFactory)
reactor.run()
|
# -*- coding: utf-8 -*-
"""
BIAS CORRECTOR FOR GPM
@ author: SERVIR MEKONG
@ correspondence M.A. LAVERDE-BARAJAS
@ mlaverdeb@gmail.com
"""
import pandas as pd
import numpy as np
from osgeo import gdal,osr
from math import sqrt
from sklearn.metrics import mean_squared_error
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
#################################################################################
# PROCESSING FUNCTIONS
#################################################################################
def point_extraction(Raster,lyr,FID,Date):
# read Raster create file
gt = Raster.GetGeoTransform()
Raster_rb = Raster.GetRasterBand(1)
Name_Sta = []
Rain = []
for a in range(len(lyr)):
feat=lyr[a]
geom = feat.GetGeometryRef()
mx,my = geom.GetX(), geom.GetY() #coord in map units
#Convert from map to pixel coordinates.
px = int((mx - gt[0]) / gt[1]) #x pixel
py = int((my - gt[3]) / gt[5]) #y pixel
intval = Raster_rb.ReadAsArray()
intval = Raster_rb.ReadAsArray(px,py,1,1)
Name_Var = feat.GetField(FID)
if isinstance(Name_Var, str):
Name_Sta.append(Name_Var)
else:
Name_Sta.append(str(int(Name_Var)))
Rain.append(intval[0])
# print intval[0]
df_step = pd.DataFrame(np.array(Rain).T,columns= Name_Sta)
df_step['Date'] = Date
df_step = df_step.set_index('Date')
return df_step
def array2raster(newRasterfn,rasterOrigin,pixelWidth,pixelHeight,array):
cols = array.shape[1]
rows = array.shape[0]
originX = rasterOrigin[0]
originY = rasterOrigin[1]
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_UInt32)
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
outband = outRaster.GetRasterBand(1)
outband.WriteArray(array)
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromEPSG(4326)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
return outRaster
def save_ascii(Raster_name,Correct,MinLon,MinLat):
f = StringIO()
np.savetxt(f,Correct, fmt='%.3f')
f.seek(0)
fs = f.read().replace('-9999.000', '-9999', -1)
f.close()
f = open(Raster_name, 'w')
f.write("ncols " + str(Correct.shape[1]) + "\n")
f.write("nrows " + str(Correct.shape[0]) + "\n")
f.write("xllcorner " + str( MinLon) + "\n")
f.write("yllcorner " + str(MinLat) + "\n")
f.write("cellsize " + str(0.1) + "\n")
f.write("NODATA_value " + str(-9999) + "\n")
f.write(fs)
f.close()
## read shapefile
def read_shapefile(sf):
fields = [x[0] for x in sf.fields][1:]
Records=[]
for r in range(len(sf.shapeRecords())):
a=sf.records()[r][:]
Records.append(a)
shps = [s.points for s in sf.shapes()]
df = pd.DataFrame(columns=fields, data=Records)
df = df.assign(coords=shps)
return df
def metrics(pred,obs):
try:
pred = pred.astype(float)
obs = obs.astype(float)
pred[pred <=1] = np.nan
obs[obs <=1] = np.nan
mask = ~np.isnan(pred) & ~np.isnan(obs)
R = np.corrcoef(pred[mask],obs[mask])
R = np.round(R[1,0],2)
RMSE = sqrt(mean_squared_error(pred[mask],obs[mask]))
BIAS = 100 * ( np.sum( pred[mask] - obs[mask] ) / np.sum( obs[mask] ) )
return R,RMSE,BIAS
except:
print('the Number of values is to low to calculate error metrics')
pass
def plot_perform(SAT,OBS,Rain_valid_BIAS,Correct,GridSRE,Boundaries,Fig_name):
import matplotlib.pyplot as plt
zmax = np.round(np.max([SAT,OBS,Rain_valid_BIAS[:,0],Rain_valid_BIAS[:,4]])/10)*10
plt.figure(num=None, figsize=(12, 4), dpi=100, facecolor='w', edgecolor='k')
plt.subplot(141)
plt.imshow(GridSRE,extent=Boundaries,vmin=0,vmax=np.max([Correct,GridSRE]))
plt.plot(Rain_valid_BIAS[:,1],Rain_valid_BIAS[:,2],'.r')
plt.colorbar(shrink=0.5)
plt.title(Fig_name[-12:-4])
plt.subplot(143)
plt.imshow(Correct,extent=Boundaries,vmin=0,vmax=np.max([Correct,GridSRE]))
plt.colorbar(shrink=0.5)
plt.subplot(142)
mask=np.logical_and(SAT>=0 , OBS>=0)
plt.scatter(OBS[mask],SAT[mask])
plt.plot([0,zmax],[0,zmax],'r')
plt.xlim([0,zmax])
plt.ylim([0,zmax])
R= np.round(np.corrcoef(OBS[mask],SAT[mask])[0,1],2)
plt.xlabel('R: {}'.format(R))
plt.title('original')
plt.gca().set_aspect('equal', adjustable='box')
plt.subplot(144)
mask=np.logical_and(Rain_valid_BIAS[:,0]>=0 , Rain_valid_BIAS[:,4]>=0)
plt.scatter(Rain_valid_BIAS[mask,0],Rain_valid_BIAS[mask,4])
R = np.round(np.corrcoef(Rain_valid_BIAS[mask,0],Rain_valid_BIAS[mask,4])[0,1],2)
plt.xlabel('R: {}'.format(R))
plt.plot([0,zmax], [0,zmax],'r')
plt.xlim([0,zmax])
plt.ylim([0,zmax])
plt.title('corrected')
plt.gca().set_aspect('equal', adjustable='box')
plt.savefig(Fig_name)
plt.close()
|
import ITlib
print "\nExample 7"
print "1000 videos (1080x720,RGB,25fps) are transmitted in a 60db SNR channel."
print "Available Bandwidth is 1MHz. What is the required compression ratio?\n"
B = 10.0 ** 6
SNRdb = 60
Width = 1080
Height = 720
nChannels = 3
fps = 25
bitsPerSample = 8
R = Width * Height * nChannels * fps * bitsPerSample
SNR = ITlib.SNR_db_to_num(SNRdb)
C = ITlib.computeChannelCapacityAWGN(B, SNR)
print "SNR = %.1f db is equivalent to %.1f" % (SNRdb, SNR)
print "Information rate is %.1f bps" % R
print "Channel capacity is %.1f bps" % C
print "Required compression ratio is %.1f : 1" % (R/C)
|
# -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# last-modified: Mar 7th, 2018
###############################################################################
import sys
import sollya
from sollya import (
S2, Interval, ceil, floor, round, inf, sup, log, exp, log1p,
guessdegree
)
from metalibm_core.core.ml_function import ML_Function, ML_FunctionBasis, DefaultArgTemplate
from metalibm_core.core.attributes import ML_Debug
from metalibm_core.core.ml_operations import *
from metalibm_core.core.ml_formats import *
from metalibm_core.core.polynomials import *
from metalibm_core.core.ml_table import ML_NewTable
from metalibm_core.core.precisions import ML_Faithful
from metalibm_core.core.special_values import (
FP_QNaN, FP_MinusInfty, FP_PlusInfty, FP_PlusZero
)
from metalibm_core.code_generation.c_code_generator import CCodeGenerator
from metalibm_core.code_generation.generic_processor import GenericProcessor
from metalibm_core.code_generation.code_object import CodeObject
from metalibm_core.code_generation.code_function import CodeFunction
from metalibm_core.code_generation.code_constant import C_Code
from metalibm_core.code_generation.generator_utility import FunctionOperator
from metalibm_core.code_generation.gappa_code_generator import GappaCodeGenerator
from metalibm_core.utility.gappa_utils import execute_gappa_script_extract
from metalibm_core.utility.arg_utils import test_flag_option, extract_option_value
from metalibm_core.utility.ml_template import ML_NewArgTemplate, ArgDefault
from metalibm_core.utility.debug_utils import *
class ML_Log1p(ML_Function("ml_log1p")):
def __init__(self, args):
ML_FunctionBasis.__init__(self, args)
@staticmethod
def get_default_args(**kw):
""" Return a structure containing the arguments for ML_Log1p,
builtin from a default argument mapping overloaded with @p kw """
default_args_log1p = {
"output_file": "my_log1p.c",
"function_name": "my_log1pf",
"precision": ML_Binary32,
"accuracy": ML_Faithful,
"target": GenericProcessor()
}
default_args_log1p.update(kw)
return DefaultArgTemplate(**default_args_log1p)
def generate_scheme(self):
vx = self.implementation.add_input_variable("x", self.precision)
sollya_precision = self.get_input_precision().sollya_object
# local overloading of RaiseReturn operation
def ExpRaiseReturn(*args, **kwords):
kwords["arg_value"] = vx
kwords["function_name"] = self.function_name
return RaiseReturn(*args, **kwords)
log2_hi_value = round(log(2), self.precision.get_field_size() - (self.precision.get_exponent_size() + 1), sollya.RN)
log2_lo_value = round(log(2) - log2_hi_value, self.precision.sollya_object, sollya.RN)
log2_hi = Constant(log2_hi_value, precision = self.precision)
log2_lo = Constant(log2_lo_value, precision = self.precision)
vx_exp = ExponentExtraction(vx, tag = "vx_exp", debug = debugd)
int_precision = self.precision.get_integer_format()
# retrieving processor inverse approximation table
dummy_var = Variable("dummy", precision = self.precision)
dummy_div_seed = ReciprocalSeed(dummy_var, precision = self.precision)
inv_approx_table = self.processor.get_recursive_implementation(dummy_div_seed, language = None, table_getter = lambda self: self.approx_table_map)
# table creation
table_index_size = 7
log_table = ML_NewTable(dimensions = [2**table_index_size, 2], storage_precision = self.precision)
log_table[0][0] = 0.0
log_table[0][1] = 0.0
for i in range(1, 2**table_index_size):
#inv_value = (1.0 + (self.processor.inv_approx_table[i] / S2**9) + S2**-52) * S2**-1
inv_value = inv_approx_table[i] # (1.0 + (inv_approx_table[i] / S2**9) ) * S2**-1
value_high = round(log(inv_value), self.precision.get_field_size() - (self.precision.get_exponent_size() + 1), sollya.RN)
value_low = round(log(inv_value) - value_high, sollya_precision, sollya.RN)
log_table[i][0] = value_high
log_table[i][1] = value_low
vx_exp = ExponentExtraction(vx, tag = "vx_exp", debug = debugd)
# case close to 0: ctz
ctz_exp_limit = -7
ctz_cond = vx_exp < ctz_exp_limit
ctz_interval = Interval(-S2**ctz_exp_limit, S2**ctz_exp_limit)
ctz_poly_degree = sup(guessdegree(log1p(sollya.x)/sollya.x, ctz_interval, S2**-(self.precision.get_field_size()+1))) + 1
ctz_poly_object = Polynomial.build_from_approximation(log1p(sollya.x)/sollya.x, ctz_poly_degree, [self.precision]*(ctz_poly_degree+1), ctz_interval, sollya.absolute)
Log.report(Log.Info, "generating polynomial evaluation scheme")
ctz_poly = PolynomialSchemeEvaluator.generate_horner_scheme(ctz_poly_object, vx, unified_precision = self.precision)
ctz_poly.set_attributes(tag = "ctz_poly", debug = debug_lftolx)
ctz_result = vx * ctz_poly
neg_input = Comparison(vx, -1, likely = False, specifier = Comparison.Less, debug = debugd, tag = "neg_input")
vx_nan_or_inf = Test(vx, specifier = Test.IsInfOrNaN, likely = False, debug = debugd, tag = "nan_or_inf")
vx_snan = Test(vx, specifier = Test.IsSignalingNaN, likely = False, debug = debugd, tag = "snan")
vx_inf = Test(vx, specifier = Test.IsInfty, likely = False, debug = debugd, tag = "inf")
vx_subnormal = Test(vx, specifier = Test.IsSubnormal, likely = False, debug = debugd, tag = "vx_subnormal")
log_function_code = CodeFunction("new_log", [Variable("x", precision = ML_Binary64)], output_format = ML_Binary64)
log_call_generator = FunctionOperator(log_function_code.get_name(), arity = 1, output_precision = ML_Binary64, declare_prototype = log_function_code)
newlog_function = FunctionObject(log_function_code.get_name(), (ML_Binary64,), ML_Binary64, log_call_generator)
# case away from 0.0
pre_vxp1 = vx + 1.0
pre_vxp1.set_attributes(tag = "pre_vxp1", debug = debug_lftolx)
pre_vxp1_exp = ExponentExtraction(pre_vxp1, tag = "pre_vxp1_exp", debug = debugd)
cm500 = Constant(-500, precision = ML_Int32)
c0 = Constant(0, precision = ML_Int32)
cond_scaling = pre_vxp1_exp > 2**(self.precision.get_exponent_size()-2)
scaling_factor_exp = Select(cond_scaling, cm500, c0)
scaling_factor = ExponentInsertion(scaling_factor_exp, precision = self.precision, tag = "scaling_factor")
vxp1 = pre_vxp1 * scaling_factor
vxp1.set_attributes(tag = "vxp1", debug = debug_lftolx)
vxp1_exp = ExponentExtraction(vxp1, tag = "vxp1_exp", debug = debugd)
vxp1_inv = ReciprocalSeed(vxp1, precision = self.precision, tag = "vxp1_inv", debug = debug_lftolx, silent = True)
vxp1_dirty_inv = ExponentInsertion(-vxp1_exp, precision = self.precision, tag = "vxp1_dirty_inv", debug = debug_lftolx)
table_index = BitLogicAnd(BitLogicRightShift(TypeCast(vxp1, precision = int_precision, debug = debuglx), self.precision.get_field_size() - 7, debug = debuglx), 0x7f, tag = "table_index", debug = debuglx)
# argument reduction
# TODO: detect if single operand inverse seed is supported by the targeted architecture
pre_arg_red_index = TypeCast(BitLogicAnd(TypeCast(vxp1_inv, precision = ML_UInt64), Constant(-2, precision = ML_UInt64), precision = ML_UInt64), precision = self.precision, tag = "pre_arg_red_index", debug = debug_lftolx)
arg_red_index = Select(Equal(table_index, 0), vxp1_dirty_inv, pre_arg_red_index, tag = "arg_red_index", debug = debug_lftolx)
red_vxp1 = Select(cond_scaling, arg_red_index * vxp1 - 1.0, (arg_red_index * vx - 1.0) + arg_red_index)
#red_vxp1 = arg_red_index * vxp1 - 1.0
red_vxp1.set_attributes(tag = "red_vxp1", debug = debug_lftolx)
log_inv_lo = TableLoad(log_table, table_index, 1, tag = "log_inv_lo", debug = debug_lftolx)
log_inv_hi = TableLoad(log_table, table_index, 0, tag = "log_inv_hi", debug = debug_lftolx)
inv_err = S2**-6 # TODO: link to target DivisionSeed precision
Log.report(Log.Info, "building mathematical polynomial")
approx_interval = Interval(-inv_err, inv_err)
poly_degree = sup(guessdegree(log(1+sollya.x)/sollya.x, approx_interval, S2**-(self.precision.get_field_size()+1))) + 1
global_poly_object = Polynomial.build_from_approximation(log(1+sollya.x)/sollya.x, poly_degree, [self.precision]*(poly_degree+1), approx_interval, sollya.absolute)
poly_object = global_poly_object.sub_poly(start_index = 1)
Log.report(Log.Info, "generating polynomial evaluation scheme")
_poly = PolynomialSchemeEvaluator.generate_horner_scheme(poly_object, red_vxp1, unified_precision = self.precision)
_poly.set_attributes(tag = "poly", debug = debug_lftolx)
Log.report(Log.Info, global_poly_object.get_sollya_object())
vxp1_inv_exp = ExponentExtraction(vxp1_inv, tag = "vxp1_inv_exp", debug = debugd)
corr_exp = Conversion(-vxp1_exp + scaling_factor_exp, precision = self.precision)# vxp1_inv_exp
#poly = (red_vxp1) * (1 + _poly)
#poly.set_attributes(tag = "poly", debug = debug_lftolx, prevent_optimization = True)
pre_result = -log_inv_hi + (red_vxp1 + red_vxp1 * _poly + (-corr_exp * log2_lo - log_inv_lo))
pre_result.set_attributes(tag = "pre_result", debug = debug_lftolx)
exact_log2_hi_exp = - corr_exp * log2_hi
exact_log2_hi_exp.set_attributes(tag = "exact_log2_hi_exp", debug = debug_lftolx, prevent_optimization = True)
#std_result = exact_log2_hi_exp + pre_result
exact_log2_lo_exp = - corr_exp * log2_lo
exact_log2_lo_exp.set_attributes(tag = "exact_log2_lo_exp", debug = debug_lftolx)#, prevent_optimization = True)
init = exact_log2_lo_exp - log_inv_lo
init.set_attributes(tag = "init", debug = debug_lftolx, prevent_optimization = True)
fma0 = (red_vxp1 * _poly + init) # - log_inv_lo)
fma0.set_attributes(tag = "fma0", debug = debug_lftolx)
step0 = fma0
step0.set_attributes(tag = "step0", debug = debug_lftolx) #, prevent_optimization = True)
step1 = step0 + red_vxp1
step1.set_attributes(tag = "step1", debug = debug_lftolx, prevent_optimization = True)
step2 = -log_inv_hi + step1
step2.set_attributes(tag = "step2", debug = debug_lftolx, prevent_optimization = True)
std_result = exact_log2_hi_exp + step2
std_result.set_attributes(tag = "std_result", debug = debug_lftolx, prevent_optimization = True)
# main scheme
Log.report(Log.Info, "MDL scheme")
pre_scheme = ConditionBlock(neg_input,
Statement(
ClearException(),
Raise(ML_FPE_Invalid),
Return(FP_QNaN(self.precision))
),
ConditionBlock(vx_nan_or_inf,
ConditionBlock(vx_inf,
Statement(
ClearException(),
Return(FP_PlusInfty(self.precision)),
),
Statement(
ClearException(),
ConditionBlock(vx_snan,
Raise(ML_FPE_Invalid)
),
Return(FP_QNaN(self.precision))
)
),
ConditionBlock(vx_subnormal,
Return(vx),
ConditionBlock(ctz_cond,
Statement(
Return(ctz_result),
),
Statement(
Return(std_result)
)
)
)
)
)
scheme = pre_scheme
return scheme
def numeric_emulate(self, input_value):
return log1p(input_value)
if __name__ == "__main__":
# auto-test
arg_template = ML_NewArgTemplate(default_arg=ML_Log1p.get_default_args())
args = arg_template.arg_extraction()
ml_log1p = ML_Log1p(args)
ml_log1p.gen_implementation()
|
from tests.utils import W3CTestCase
class TestFlexbox_ItemVerticalAlign(W3CTestCase):
vars().update(W3CTestCase.find_tests(__file__, 'flexbox_item-vertical-align'))
|
# Copyright 2022 The Blqs Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cirq
import pymore
import pytest
import blqs_cirq as bc
def test_moment():
def fn():
with bc.Moment():
bc.H(0)
bc.CX(1, 2)
q0, q1, q2 = cirq.LineQubit.range(3)
assert bc.build(fn)() == cirq.Circuit(cirq.Moment([cirq.H(q0), cirq.CX(q1, q2)]))
def test_multiple_moments():
def fn():
with bc.Moment():
bc.CX(1, 2)
with bc.Moment():
bc.X(0)
q0, q1, q2 = cirq.LineQubit.range(3)
assert bc.build(fn)() == cirq.Circuit(
[
cirq.Moment([cirq.CX(q1, q2)]),
cirq.Moment([cirq.X(q0)]),
]
)
def test_empty_moment():
def fn():
with bc.Moment():
pass
assert bc.build(fn)() == cirq.Circuit(cirq.Moment([]))
def test_moment_target_overlap():
def fn():
with bc.Moment():
bc.H(0)
bc.CX(0, 1)
with pytest.raises(ValueError, match="Overlapping operations"):
bc.build(fn)()
def fn_repeat():
with bc.Moment():
bc.H(0)
with bc.Repeat(repetitions=10):
bc.CX(0, 1)
with pytest.raises(ValueError, match="Overlapping operations"):
bc.build(fn_repeat)()
def test_moment_repeat():
def fn():
with bc.Moment():
with bc.Repeat(repetitions=10):
bc.H(0)
h = cirq.Circuit([cirq.H(cirq.LineQubit(0))])
assert bc.build(fn)() == cirq.Circuit([cirq.CircuitOperation(h.freeze(), repetitions=10)])
def test_moment_append_extend():
m = bc.Moment()
m.append(bc.H(0))
m.extend([bc.X(1), bc.X(2)])
assert m.statements() == (bc.H(0), bc.X(1), bc.X(2))
def test_moment_context_manager():
with bc.Moment() as m:
bc.H(0)
bc.X(1)
assert m.statements() == (bc.H(0), bc.X(1))
def test_moment_str():
with bc.Moment() as m:
pass
assert str(m) == "with Moment():\n"
with bc.Moment() as m:
bc.H(0)
bc.H(1)
assert str(m) == "with Moment():\n H 0\n H 1"
def test_moment_equality():
m0 = bc.Moment()
with bc.Moment() as m1:
bc.H(0)
with bc.Moment() as m2:
bc.H(1)
with bc.Moment() as m3:
bc.H(0)
bc.H(1)
equals_tester = pymore.EqualsTester()
equals_tester.make_equality_group(lambda: m0)
equals_tester.make_equality_group(lambda: m1)
equals_tester.add_equality_group(m2)
equals_tester.add_equality_group(m3)
|
# -*- coding:utf-8 -*-
# https://leetcode.com/problems/insert-interval/description/
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution(object):
def insert(self, intervals, newInterval):
"""
:type intervals: List[Interval]
:type newInterval: Interval
:rtype: List[Interval]
"""
ret = []
for i, interval in enumerate(intervals):
if interval.start >= newInterval.start and newInterval.end >= interval.end:
continue
ap = True
if interval.start <= newInterval.start <= interval.end:
newInterval.start = interval.start
ap = False
if interval.start <= newInterval.end <= interval.end:
newInterval.end = interval.end
ap = False
if ap:
ret.append(interval)
ret.append(newInterval)
ret.sort(key=lambda x: x.start)
return ret
|
import argparse
import logging
import subprocess
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import Tuple
from src.config import Config
from src.format import DiskFormatter
from src.manager import DiskManager
from src.mount import DiskMounter
def parse_arguments() -> Tuple[ArgumentParser, Namespace]:
parser = argparse.ArgumentParser(
description="Chiadisk: Disk formatter and health checker."
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--config', type=str, help="path to config.yaml")
group.add_argument('--version', action='store_true')
return parser, parser.parse_args()
def init(config: Config):
logging.basicConfig(
format="[%(asctime)s] [%(levelname)8s] --- %(message)s (%(filename)s:%(lineno)s)",
level=config.get_log_level(),
datefmt="%Y-%m-%d %H:%M:%S",
)
logging.info(f"Starting Chiadisk ({version()})")
# Disk Formatter - formats disks
formatter = DiskFormatter(config)
# Disk Mounter - creates mount points and mounts disks
mounter = DiskMounter(config)
# Disk Checker - checks disk health
# Disk Manager - ties it all together
chiadisk = DiskManager(config, formatter, mounter)
def version():
try:
command_args = ["git", "describe", "--tags"]
f = subprocess.Popen(command_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = f.communicate()
return stdout.decode(encoding="utf-8").rstrip()
except:
return "unknown"
if __name__ == "__main__":
# Parse config and configure logger
argparse, args = parse_arguments()
if args.config:
conf = Config(Path(args.config))
init(conf)
elif args.version:
print(version())
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2015 Roberto Longobardi
#
# This file is part of the Test Manager plugin for Trac.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at:
# https://trac-hacks.org/wiki/TestManagerForTracPluginLicense
#
# Author: Roberto Longobardi <otrebor.dev@gmail.com>
#
from trac.util import get_reporter_id
from testmanager.api import TestManagerSystem
from testmanager.model import TestCatalog, TestCase, TestCaseInPlan, TestPlan
from tracgenericclass.util import formatExceptionInfo
from trac.core import Component, implements
try:
# Check that tracrpc plugin is available. Otherwise, an ImportError exception will be raised.
from tracrpc.api import IXMLRPCHandler
__all__ = ['TestManagerRPC']
class TestManagerRPC(Component):
implements(IXMLRPCHandler)
def __init__(self):
self.testmanagersys = TestManagerSystem(self.env)
def xmlrpc_namespace(self):
return 'testmanager'
def xmlrpc_methods(self):
yield ('TEST_MODIFY', ((str, str, str, str),(str, str, str, str, dict)), self.createTestCatalog)
yield ('TEST_MODIFY', ((str, str, str, str),(str, str, str, str, dict)), self.createTestCase)
yield ('TEST_PLAN_ADMIN', ((str, str, str),(str, str, str, dict)), self.createTestPlan)
yield (None, ((bool, str, str),), self.deleteTestObject)
yield (None, ((bool, str, str),(bool, str, str, dict)), self.modifyTestObject)
yield (None, ((bool, str, str, str),), self.setTestCaseStatus)
yield ('TEST_VIEW', ((list, str),(list, str, str)), self.listTestCases)
yield ('TEST_VIEW', ((list, str),), self.getTestCatalog)
yield ('TEST_VIEW', ((list, str),(list, str, str)), self.getTestCase)
yield ('TEST_VIEW', ((list, str, str),), self.getTestPlan)
yield ('TEST_VIEW', ((list,),), self.listRootCatalogs)
yield ('TEST_VIEW', ((list, str),), self.listSubCatalogs)
yield ('TEST_VIEW', ((list, str),), self.listTestPlans)
def createTestCatalog(self, req, parent_catalog_id, title, description, customfields = {}):
""" Creates a new test catalog, in the parent catalog specified,
with the specified title and description.
To create a root catalog, specify '' as the parent catalog.
Returns the generated object ID, or '-1' if an error occurs. """
result = '-1'
try:
id_ = self.testmanagersys.get_next_id('catalog')
pagename = None
if parent_catalog_id is not None and parent_catalog_id != '':
# Check parent catalog really exists, and get its page_name
tcat = TestCatalog(self.env, parent_catalog_id)
if not tcat.exists:
self.env.log.error("Input parent test catalog with ID %s not found." % parent_catalog_id)
return result
pagename = tcat['page_name'] + '_TT' + id_
else:
pagename = 'TC_TT' + id_
author = get_reporter_id(req, 'author')
new_tc = TestCatalog(self.env, id_, pagename, title, description)
new_tc.author = author
new_tc.remote_addr = req.remote_addr
for custom_field_name in customfields:
new_tc[custom_field_name] = customfields[custom_field_name]
# This also creates the Wiki page
new_tc.insert()
result = id_
except:
self.env.log.error("Error adding test catalog with title '%s' in catalog with ID %s!" % (title, parent_catalog_id))
self.env.log.error(formatExceptionInfo())
return id_
def createTestCase(self, req, catalog_id, title, description, customfields = {}):
""" Creates a new test case, in the catalog specified, with the
specified title and description.
Returns the generated object ID, or '-1' if an error occurs. """
result = '-1'
try:
if catalog_id is None or catalog_id == '':
self.env.log.error("Cannot create a test plan on the root catalog container.")
return result
# Check catalog really exists, and get its page_name
tcat = TestCatalog(self.env, catalog_id)
if not tcat.exists:
self.env.log.error("Input test catalog with ID %s not found." % catalog_id)
return result
author = get_reporter_id(req, 'author')
id_ = self.testmanagersys.get_next_id('testcase')
pagename = tcat['page_name'] + '_TC' + id_
new_tc = TestCase(self.env, id_, pagename, title, description)
new_tc.author = author
new_tc.remote_addr = req.remote_addr
for custom_field_name in customfields:
new_tc[custom_field_name] = customfields[custom_field_name]
# This also creates the Wiki page
new_tc.insert()
result = id_
except:
self.env.log.error("Error adding test case with title '%s' in catalog with ID %s!" % (title, catalog_id))
self.env.log.error(formatExceptionInfo())
return id_
def createTestPlan(self, req, catalog_id, name, customfields = {}):
""" Creates a new test plan, on the catalog specified, with the
specified name.
Returns the generated object ID, or '-1' if an error occurs. """
result = '-1'
try:
# Check catalog really exists, and get its page_name
tcat = TestCatalog(self.env, catalog_id)
if not tcat.exists:
self.env.log.error("Input test catalog with ID %s not found." % catalog_id)
return result
author = get_reporter_id(req, 'author')
id_ = self.testmanagersys.get_next_id('testplan')
pagename = tcat['page_name']
new_tp = TestPlan(self.env, id_, catalog_id, pagename, name, author)
for custom_field_name in customfields:
new_tp[custom_field_name] = customfields[custom_field_name]
new_tp.insert()
result = id_
except:
self.env.log.error("Error adding test plan with name '%s' for catalog with ID %s!" % (name, catalog_id))
self.env.log.error(formatExceptionInfo())
return result
def deleteTestObject(self, req, objtype, id_):
""" Deletes the test object of the specified type identified
by the given id.
Returns True if successful, False otherwise. """
try:
# Check the object exists
obj = None
if objtype == 'testcatalog':
req.perm.require('TEST_MODIFY')
obj = TestCatalog(self.env, id_)
elif objtype == 'testcase':
req.perm.require('TEST_MODIFY')
obj = TestCase(self.env, id_)
elif objtype == 'testplan':
req.perm.require('TEST_PLAN_ADMIN')
obj = TestPlan(self.env, id_)
if not obj.exists:
self.env.log.error("Input test object of type %s with ID %s not found." % (objtype, id_))
return False
obj.delete()
except:
self.env.log.error("Error deleting test object of type %s with ID %s." % (objtype, id_))
self.env.log.error(formatExceptionInfo())
return False
return True
def modifyTestObject(self, req, objtype, id_, attributes={}):
""" Modifies the test object of the specified type identified
by the given id.
For testcaseinplan objects, also the 'planid' attrribute is required
te be specified in the attributes dictionary.
Returns True if successful, False otherwise. """
try:
# Check the object exists
obj = None
if objtype == 'testcatalog':
req.perm.require('TEST_MODIFY')
obj = TestCatalog(self.env, id_)
elif objtype == 'testcase':
req.perm.require('TEST_MODIFY')
obj = TestCase(self.env, id_)
elif objtype == 'testplan':
req.perm.require('TEST_PLAN_ADMIN')
obj = TestPlan(self.env, id_)
elif objtype == 'testcaseinplan':
req.perm.require('TEST_EXECUTE')
if not 'planid' in attributes:
self.env.log.error("'planid' attribute must be provided.")
# Note: Test case in plan objects may not exist if their status has not been set yet
obj = TestCaseInPlan(self.env, id_, attributes['planid'])
if not obj.exists:
self.env.log.error("Input test object of type %s with ID %s not found." % (objtype, id_))
return False
author = get_reporter_id(req, 'author')
for k, v in attributes.iteritems():
if k == 'title':
obj.title = v
elif k == 'description':
obj.description = v
else:
obj[k] = v
obj.author = author
obj.remote_addr = req.remote_addr
obj.save_changes(author, "Changed through RPC.")
except:
self.env.log.error("Error modifying test object of type %s with ID %s." % (objtype, id_))
self.env.log.error(formatExceptionInfo())
return False
return True
def setTestCaseStatus(self, req, testcase_id, plan_id, status):
""" Sets the test case status.
Returns True if successful, False otherwise. """
try:
author = get_reporter_id(req, 'author')
tcip = TestCaseInPlan(self.env, testcase_id, plan_id)
if tcip.exists:
tcip.set_status(status, author)
tcip.save_changes(author, "Status changed")
else:
tc = TestCase(self.env, testcase_id)
tcip['page_name'] = tc['page_name']
tcip.set_status(status, author)
tcip.insert()
except:
self.env.log.error("Error setting the test case status with ID %s on plan %s to %s!" % (testcase_id, plan_id, status))
self.env.log.error(formatExceptionInfo())
return False
return True
def getTestCatalog(self, req, catalog_id):
""" Returns the catalog properties.
The result is in the form, all strings:
(wiki_page_name, title, description, customfields) """
try:
# Check catalog really exists
tcat = TestCatalog(self.env, catalog_id)
if not tcat.exists:
self.env.log.error("Input test catalog with ID %s not found." % catalog_id)
else:
customfields = []
self._append_custom_fields(tcat, customfields)
return (tcat['page_name'], tcat.title, tcat.description, customfields)
except:
self.env.log.error("Error getting the test catalog with ID %s!" % catalog_id)
self.env.log.error(formatExceptionInfo())
def getTestCase(self, req, testcase_id, plan_id=''):
""" Returns the test case properties.
If plan_id is provided, also the status of the test case in the
plan will be returned.
Each result is in the form, all strings:
If plan_id is NOT provided:
(wiki_page_name, title, description, customfields)
If plan_id is provided:
(wiki_page_name, title, description, status, customfields) """
try:
# Check test case really exists
tc = TestCase(self.env, testcase_id)
if not tc.exists:
self.env.log.error("Input test case with ID %s not found." % testcase_id)
else:
customfields = []
self._append_custom_fields(tc, customfields)
if plan_id is None or plan_id == '':
return (tc['page_name'], tc.title, tc.description, customfields)
else:
tcip = TestCaseInPlan(self.env, testcase_id, plan_id)
self._append_custom_fields(tcip, customfields)
return (tc['page_name'], tc.title, tc.description, tcip['status'], customfields)
except:
self.env.log.error("Error getting the test case with ID %s!" % testcase_id)
self.env.log.error(formatExceptionInfo())
def getTestPlan(self, req, plan_id, catalog_id):
""" Returns the test plan properties.
The result is in the form, all strings:
(wiki_page_name, name, customfields) """
try:
# Check test plan really exists
tp = TestPlan(self.env, plan_id, catalog_id)
if not tp.exists:
self.env.log.error("Input test plan with ID %s on catalog %s not found." % (plan_id, catalog_id))
else:
customfields = []
self._append_custom_fields(tp, customfields)
return (tp['page_name'], tp['name'], customfields)
except:
self.env.log.error("Error getting the test plan with ID %s on catalog %s." % (plan_id, catalog_id))
self.env.log.error(formatExceptionInfo())
def listRootCatalogs(self, req):
""" Returns a iterator over the root-level test catalogs.
Each result is in the form, all strings:
(test_catalog_id, wiki_page_name, title, description, customfields) """
try:
for tc in TestCatalog.list_root_catalogs(self.env):
customfields = []
self._append_custom_fields(tc, customfields)
yield (tc['id'], tc['page_name'], tc.title, tc.description, customfields)
except:
self.env.log.error("Error listing the root-level test catalogs!")
self.env.log.error(formatExceptionInfo())
def listSubCatalogs(self, req, catalog_id):
""" Returns a iterator over the direct sub-catalogs of the specified
catalog.
Each result is in the form, all strings:
(test_catalog_id, wiki_page_name, title, description, customfields) """
try:
# Check catalog really exists
tcat = TestCatalog(self.env, catalog_id)
if not tcat.exists:
self.env.log.error("Input test catalog with ID %s not found." % catalog_id)
else:
for tc in tcat.list_subcatalogs():
customfields = []
self._append_custom_fields(tc, customfields)
yield (tc['id'], tc['page_name'], tc.title, tc.description, customfields)
except:
self.env.log.error("Error listing the test catalogs!")
self.env.log.error(formatExceptionInfo())
def listTestPlans(self, req, catalog_id):
""" Returns a iterator over the test plans associated
to the specified catalog.
Each result is in the form, all strings:
(testplan_id, name, customfields) """
try:
# Check catalog really exists
tcat = TestCatalog(self.env, catalog_id)
if not tcat.exists:
self.env.log.error("Input test catalog with ID %s not found." % catalog_id)
else:
for tp in tcat.list_testplans():
customfields = []
self._append_custom_fields(tp, customfields)
yield (tp['id'], tp['name'], customfields)
except:
self.env.log.error("Error listing the test plans!")
self.env.log.error(formatExceptionInfo())
def listTestCases(self, req, catalog_id, plan_id=''):
""" Returns a iterator over the test cases directly in the
specified catalog (no sub-catalogs).
If plan_id is provided, also the status of the test case in the
plan will be returned.
Each result is in the form, all strings:
If plan_id is NOT provided:
(testcase_id, wiki_page_name, title, description, customfields)
If plan_id is provided:
(testcase_id, wiki_page_name, status, customfields) """
try:
# Check catalog really exists
tcat = TestCatalog(self.env, catalog_id)
if not tcat.exists:
self.env.log.error("Input test catalog with ID %s not found." % catalog_id)
else:
tc_list = {}
if plan_id is None or plan_id == '':
for tc in tcat.list_testcases():
# Returned object is a TestCase
customfields = []
self._append_custom_fields(tc, customfields)
tc_list[tc['exec_order']] = (tc['id'], tc['page_name'], tc.title, tc.description, customfields)
else:
for tcip in tcat.list_testcases(plan_id):
# Returned object is a TestCaseInPlan
customfields = []
self._append_custom_fields(tcip, customfields)
tc_list[tcip['exec_order']] = (tcip['id'], tcip['page_name'], tcip['status'], customfields)
for key in sorted(tc_list.keys()):
yield tc_list[key]
except:
self.env.log.error("Error listing the test cases in the catalog with ID %s!" % catalog_id)
self.env.log.error(formatExceptionInfo())
def _append_custom_fields(self, obj, customfields):
for field in obj.fields:
if field.get("custom"):
customtuple = (field.get('name'), obj[field.get('name')], field.get('label'))
customfields.append(customtuple)
except ImportError:
print "\n\nError importing optional Trac XML-RPC Plugin. No XML-RPC remote interface will be available."
print "If you require XML-RPC access to the Test Manager, download and install it from http://trac-hacks.org/wiki/XmlRpcPlugin.\n\n"
|
"""
Internationalization support.
"""
from __future__ import unicode_literals
import re
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils import six
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, six.integer_types):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
class NumberAwareString(resultclass):
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
try:
number_value = rhs[number]
except KeyError:
raise KeyError('Your dictionary lacks key \'%s\'. '
'Please provide it, because it is required to '
'determine whether string is singular or plural.'
% number)
else:
number_value = rhs
kwargs['number'] = number_value
translated = func(**kwargs)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
return proxy
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def ungettext_lazy(singular, plural, number=None):
return lazy_number(ungettext, six.text_type, singular=singular, plural=plural, number=number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, six.text_type, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(object):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
self.old_language = get_language()
def __enter__(self):
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, origin=None):
return _trans.templatize(src, origin)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return ''.join(force_text(s) for s in strings)
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
return LANG_INFO[lang_code]
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
return LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
trim_whitespace_re = re.compile('\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wizard_builder import __version__ as version
from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert_file('README.md', 'rst') + \
pypandoc.convert_file('HISTORY.md', 'rst')
except BaseException:
long_description = ''
setup(
name='django-wizard-builder',
version=version,
description='Create multi-page forms from the Django admin',
long_description=long_description,
license="BSD",
author='Project Callisto',
author_email='tech@projectcallisto.org',
url='https://github.com/SexualHealthInnovations/django-wizard-builder',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
python_requires='>=3',
install_requires=[
'django-widget-tweaks==1.4.1',
'django-nested-admin==3.0.21',
],
)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from neutronclient import shell as neutronshell
from neutronclient.tests.unit import test_cli20 as neutron_test_cli20
from neutronclient.v2_0 import client as ovsvappclient
TOKEN = neutron_test_cli20.TOKEN
end_url = neutron_test_cli20.end_url
class MyResp(neutron_test_cli20.MyResp):
pass
class MyApp(neutron_test_cli20.MyApp):
pass
class MyComparator(neutron_test_cli20.MyComparator):
pass
class MyUrlComparator(neutron_test_cli20.MyUrlComparator):
pass
class ContainsKeyValue(neutron_test_cli20.ContainsKeyValue):
pass
class CLITestV20Base(neutron_test_cli20.CLITestV20Base):
def setUp(self, plurals=None):
super(CLITestV20Base, self).setUp()
self.client = ovsvappclient.Client(token=TOKEN,
endpoint_url=self.endurl)
def assert_mock_multiple_calls_with_same_arguments(
self, mocked_method, expected_call, count):
if count is None:
self.assertLessEqual(1, mocked_method.call_count)
else:
self.assertEqual(count, mocked_method.call_count)
mocked_method.assert_has_calls(
[expected_call] * mocked_method.call_count)
def _test_create_resource(self, resource, cmd, name, myid, args,
position_names, position_values,
tenant_id=None, tags=None, admin_state_up=True,
extra_body=None, cmd_resource=None,
parent_id=None, **kwargs):
if not cmd_resource:
cmd_resource = resource
body = {resource: {}, }
body[resource].update(kwargs)
for i in range(len(position_names)):
body[resource].update({position_names[i]: position_values[i]})
ress = {resource:
{self.id_field: myid}, }
if name:
ress[resource].update({'name': name})
resstr = self.client.serialize(ress)
# url method body
resource_plural = self.client.get_resource_plural(cmd_resource)
path = getattr(self.client, resource_plural + "_path")
mock_body = MyComparator(body, self.client)
cmd_parser = cmd.get_parser('create_' + resource)
resp = (MyResp(200), resstr)
with mock.patch.object(cmd, "get_client",
return_value=self.client) as mock_get_client, \
mock.patch.object(self.client.httpclient, "request",
return_value=resp) as mock_request:
neutronshell.run_command(cmd, cmd_parser, args)
_str = self.fake_stdout.make_string()
self.assertIn(myid, _str)
if name:
self.assertIn(name, _str)
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), None)
mock_request.asswert_called_once_with(
MyUrlComparator(end_url(path), self.client),
'POST',
body=mock_body,
headers=ContainsKeyValue({'X-Auth-Token': TOKEN}))
def _test_update_resource(self, resource, cmd, myid, args, extrafields,
cmd_resource=None, parent_id=None):
if not cmd_resource:
cmd_resource = resource
body = {resource: extrafields}
path = getattr(self.client, cmd_resource + "_path")
if parent_id:
path = path % (parent_id, myid)
else:
path = path % extrafields['vcenter_id']
mock_body = MyComparator(body, self.client)
cmd_parser = cmd.get_parser("update_" + cmd_resource)
resp = (MyResp(204), None)
with mock.patch.object(cmd, "get_client",
return_value=self.client) as mock_get_client, \
mock.patch.object(self.client.httpclient, "request",
return_value=resp) as mock_request:
neutronshell.run_command(cmd, cmd_parser, args)
_str = self.fake_stdout.make_string()
self.assertEqual(_str, '')
self.assert_mock_multiple_calls_with_same_arguments(
mock_get_client, mock.call(), None)
mock_request.asswert_called_once_with(
MyUrlComparator(end_url(path), self.client),
'PUT',
body=mock_body,
headers=ContainsKeyValue({'X-Auth-Token': TOKEN}))
|
"""
This file defines some constants that
are used across the selfdiff project.
"""
# Third-party dependencies
import numpy as np
# Logic
dtype = "float64" # Default data type used by Tensors.
fuzz = 1e-7 # Small number added to values to prevent
# division by zero, or zero in log.
large = 1e9 # Used to mimic +/- infinity. For example masking
# inside softmax.
avg = np.mean # Method to average in backward pass.
sum = np.sum # Method to sum in backward pass.
# Python
devmode = True # Developer mode, enables logging and warnings.
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the importmulti RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_greater_than, assert_raises_rpc_error
class ImportMultiTest (BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-addresstype=legacy"], ["-addresstype=legacy"]]
self.setup_clean_chain = True
def setup_network(self):
self.setup_nodes()
def run_test (self):
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
node0_address1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
#Check only one address
assert_equal(node0_address1['ismine'], True)
#Node 1 sync test
assert_equal(self.nodes[1].getblockcount(),1)
#Address Test - before import
address_info = self.nodes[1].getaddressinfo(node0_address1['address'])
assert_equal(address_info['iswatchonly'], False)
assert_equal(address_info['ismine'], False)
# RPC importmulti -----------------------------------------------
# Bitcoin Address
self.log.info("Should import an address")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_address = address['address']
watchonly_timestamp = timestamp
self.log.info("Should not import an invalid address")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": "not valid address",
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Invalid address')
# ScriptPubKey + internal
self.log.info("Should import a scriptPubKey with internal flag")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + !internal
self.log.info("Should not import a scriptPubKey without internal flag")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Public key + !Internal
self.log.info("Should import an address with public key")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + internal
self.log.info("Should import a scriptPubKey with internal and with public key")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Public key + !internal
self.log.info("Should not import a scriptPubKey without internal and with public key")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address['pubkey'] ]
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly
self.log.info("Should import an address with private key")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
self.log.info("Should not import an address with private key if is already imported")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'The wallet already contains the private key for this address or script')
# Address + Private key + watchonly
self.log.info("Should not import an address with private key and with watchonly")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal
self.log.info("Should import a scriptPubKey with internal and with private key")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], True)
assert_equal(address_assert['timestamp'], timestamp)
# ScriptPubKey + Private key + !internal
self.log.info("Should not import a scriptPubKey without internal and with private key")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Internal must be set for hex scriptPubKey')
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# P2SH address
sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].getaddressinfo(multi_sig_script['address'])
assert_equal(address_assert['isscript'], True)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], False)
# P2SH + Redeem script
sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript']
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].getaddressinfo(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + !Watchonly
sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])]
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].getaddressinfo(multi_sig_script['address'])
assert_equal(address_assert['timestamp'], timestamp)
p2shunspent = self.nodes[1].listunspent(0,999999, [multi_sig_script['address']])[0]
assert_equal(p2shunspent['spendable'], False)
assert_equal(p2shunspent['solvable'], True)
# P2SH + Redeem script + Private Keys + Watchonly
sig_address_1 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
sig_address_2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
sig_address_3 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
multi_sig_script = self.nodes[0].createmultisig(2, [sig_address_1['pubkey'], sig_address_2['pubkey'], sig_address_3['pubkey']])
self.nodes[1].generate(100)
self.nodes[1].sendtoaddress(multi_sig_script['address'], 10.00)
self.nodes[1].generate(1)
timestamp = self.nodes[1].getblock(self.nodes[1].getbestblockhash())['mediantime']
self.log.info("Should import a p2sh with respective redeem script and private keys")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": multi_sig_script['address']
},
"timestamp": "now",
"redeemscript": multi_sig_script['redeemScript'],
"keys": [ self.nodes[0].dumpprivkey(sig_address_1['address']), self.nodes[0].dumpprivkey(sig_address_2['address'])],
"watchonly": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -8)
assert_equal(result[0]['error']['message'], 'Incompatibility found between watchonly and keys')
# Address + Public key + !Internal + Wrong pubkey
self.log.info("Should not import an address with a wrong public key")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Public key + internal + Wrong pubkey
self.log.info("Should not import a scriptPubKey with internal and with a wrong public key")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
request = [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"pubkeys": [ address2['pubkey'] ],
"internal": True
}]
result = self.nodes[1].importmulti(request)
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Address + Private key + !watchonly + Wrong private key
self.log.info("Should not import an address with a wrong private key")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": address['address']
},
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ]
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# ScriptPubKey + Private key + internal + Wrong private key
self.log.info("Should not import a scriptPubKey with internal and with a wrong private key")
address = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
address2 = self.nodes[0].getaddressinfo(self.nodes[0].getnewaddress())
result = self.nodes[1].importmulti([{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "now",
"keys": [ self.nodes[0].dumpprivkey(address2['address']) ],
"internal": True
}])
assert_equal(result[0]['success'], False)
assert_equal(result[0]['error']['code'], -5)
assert_equal(result[0]['error']['message'], 'Consistency check failed')
address_assert = self.nodes[1].getaddressinfo(address['address'])
assert_equal(address_assert['iswatchonly'], False)
assert_equal(address_assert['ismine'], False)
assert_equal('timestamp' in address_assert, False)
# Importing existing watch only address with new timestamp should replace saved timestamp.
assert_greater_than(timestamp, watchonly_timestamp)
self.log.info("Should replace previously saved watch only timestamp.")
result = self.nodes[1].importmulti([{
"scriptPubKey": {
"address": watchonly_address,
},
"timestamp": "now",
}])
assert_equal(result[0]['success'], True)
address_assert = self.nodes[1].getaddressinfo(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], timestamp)
watchonly_timestamp = timestamp
# restart nodes to check for proper serialization/deserialization of watch only address
self.stop_nodes()
self.start_nodes()
address_assert = self.nodes[1].getaddressinfo(watchonly_address)
assert_equal(address_assert['iswatchonly'], True)
assert_equal(address_assert['ismine'], False)
assert_equal(address_assert['timestamp'], watchonly_timestamp)
# Bad or missing timestamps
self.log.info("Should throw on invalid or missing timestamp values")
assert_raises_rpc_error(-3, 'Missing required timestamp field for key',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
}])
assert_raises_rpc_error(-3, 'Expected number or "now" timestamp value for key. got type string',
self.nodes[1].importmulti, [{
"scriptPubKey": address['scriptPubKey'],
"timestamp": "",
}])
if __name__ == '__main__':
ImportMultiTest ().main ()
|
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Normalization modules for Flax."""
from typing import (Any, Callable, Iterable, Optional, Tuple, Union)
from flax.linen.dtypes import canonicalize_dtype
from flax.linen.module import Module, compact, merge_param # pylint: disable=g-multiple-import
from jax import lax
from jax.nn import initializers
import jax.numpy as jnp
PRNGKey = Any
Array = Any
Shape = Tuple[int, ...]
Dtype = Any # this could be a real type?
Axes = Union[int, Iterable[int]]
def _canonicalize_axes(rank: int, axes: Axes) -> Tuple[int, ...]:
"""Returns a tuple of deduplicated, sorted, and positive axes."""
if not isinstance(axes, Iterable):
axes = (axes,)
return tuple(set([rank + axis if axis < 0 else axis for axis in axes]))
def _abs_sq(x):
"""Computes the elementwise square of the absolute value |x|^2."""
if jnp.iscomplexobj(x):
return lax.square(lax.real(x)) + lax.square(lax.imag(x))
else:
return lax.square(x)
def _compute_stats(x: Array, axes: Axes,
dtype: Optional[Dtype],
axis_name: Optional[str] = None,
axis_index_groups: Any = None):
"""Computes mean and variance statistics.
This implementation takes care of a few important details:
- Computes in float32 precision for stability in half precision training.
- mean and variance are computable in a single XLA fusion,
by using Var = E[|x|^2] - |E[x]|^2 instead of Var = E[|x - E[x]|^2]).
- Clips negative variances to zero which can happen due to
roundoff errors. This avoids downstream NaNs.
- Supports averaging across a parallel axis and subgroups of a parallel axis
with a single `lax.pmean` call to avoid latency.
Arguments:
x: Input array.
axes: The axes in ``x`` to compute mean and variance statistics for.
dtype: Optional dtype specifying the minimal precision. Statistics
are always at least float32 for stability (default: dtype of x).
axis_name: Optional name for the pmapped axis to compute mean over.
axis_index_groups: Optional axis indices.
Returns:
A pair ``(mean, var)``.
"""
if dtype is None:
dtype = jnp.result_type(x)
# promote x to at least float32, this avoids half precision computation
# but preserves double or complex floating points
dtype = jnp.promote_types(dtype, jnp.float32)
x = jnp.asarray(x, dtype)
mean = jnp.mean(x, axes)
mean2 = jnp.mean(_abs_sq(x), axes)
if axis_name is not None:
concatenated_mean = jnp.concatenate([mean, mean2])
mean, mean2 = jnp.split(
lax.pmean(
concatenated_mean,
axis_name=axis_name,
axis_index_groups=axis_index_groups), 2)
# mean2 - _abs_sq(mean) is not guaranteed to be non-negative due
# to floating point round-off errors.
var = jnp.maximum(0., mean2 - _abs_sq(mean))
return mean, var
def _normalize(mdl: Module, x: Array, mean: Array, var: Array,
reduction_axes: Axes, feature_axes: Axes,
dtype: Dtype, param_dtype: Dtype,
epsilon: float,
use_bias: bool, use_scale: bool,
bias_init: Callable[[PRNGKey, Shape, Dtype], Array],
scale_init: Callable[[PRNGKey, Shape, Dtype], Array]):
""""Normalizes the input of a normalization layer and optionally applies a learned scale and bias.
Arguments:
mdl: Module to apply the normalization in (normalization params will reside
in this module).
x: The input.
mean: Mean to use for normalization.
var: Variance to use for normalization.
reduction_axes: The axes in ``x`` to reduce.
feature_axes: Axes containing features. A separate bias and scale is learned
for each specified feature.
dtype: The dtype of the result (default: infer from input and params).
param_dtype: The dtype of the parameters.
epsilon: Normalization epsilon.
use_bias: If true, add a bias term to the output.
use_scale: If true, scale the output.
bias_init: Initialization function for the bias term.
scale_init: Initialization function for the scaling function.
Returns:
The normalized input.
"""
reduction_axes = _canonicalize_axes(x.ndim, reduction_axes)
feature_axes = _canonicalize_axes(x.ndim, feature_axes)
stats_shape = list(x.shape)
for axis in reduction_axes:
stats_shape[axis] = 1
mean = mean.reshape(stats_shape)
var = var.reshape(stats_shape)
feature_shape = [1] * x.ndim
reduced_feature_shape = []
for ax in feature_axes:
feature_shape[ax] = x.shape[ax]
reduced_feature_shape.append(x.shape[ax])
y = x - mean
mul = lax.rsqrt(var + epsilon)
args = [x]
if use_scale:
scale = mdl.param('scale', scale_init, reduced_feature_shape,
param_dtype).reshape(feature_shape)
mul *= scale
args.append(scale)
y *= mul
if use_bias:
bias = mdl.param('bias', bias_init, reduced_feature_shape,
param_dtype).reshape(feature_shape)
y += bias
args.append(bias)
dtype = canonicalize_dtype(*args, dtype=dtype)
return jnp.asarray(y, dtype)
class BatchNorm(Module):
"""BatchNorm Module.
Usage Note:
If we define a model with BatchNorm, for example::
BN = nn.BatchNorm(use_running_average=False, momentum=0.9, epsilon=1e-5,
dtype=jnp.float32)
The initialized variables dict will contain in addition to a 'params'
collection a separate 'batch_stats' collection that will contain all the
running statistics for all the BatchNorm layers in a model::
vars_initialized = BN.init(key, x) # {'params': ..., 'batch_stats': ...}
We then update the batch_stats during training by specifying that the
`batch_stats` collection is mutable in the `apply` method for our module.::
vars_in = {'params': params, 'batch_stats': old_batch_stats}
y, mutated_vars = BN.apply(vars_in, x, mutable=['batch_stats'])
new_batch_stats = mutated_vars['batch_stats']
During eval we would define BN with `use_running_average=True` and use the
batch_stats collection from training to set the statistics. In this case
we are not mutating the batch statistics collection, and needn't mark it
mutable::
vars_in = {'params': params, 'batch_stats': training_batch_stats}
y = BN.apply(vars_in, x)
Attributes:
use_running_average: if True, the statistics stored in batch_stats
will be used instead of computing the batch statistics on the input.
axis: the feature or non-batch axis of the input.
momentum: decay rate for the exponential moving average of
the batch statistics.
epsilon: a small float added to variance to avoid dividing by zero.
dtype: the dtype of the result (default: infer from input and params).
param_dtype: the dtype passed to parameter initializers (default: float32).
use_bias: if True, bias (beta) is added.
use_scale: if True, multiply by scale (gamma).
When the next layer is linear (also e.g. nn.relu), this can be disabled
since the scaling will be done by the next layer.
bias_init: initializer for bias, by default, zero.
scale_init: initializer for scale, by default, one.
axis_name: the axis name used to combine batch statistics from multiple
devices. See `jax.pmap` for a description of axis names (default: None).
axis_index_groups: groups of axis indices within that named axis
representing subsets of devices to reduce over (default: None). For
example, `[[0, 1], [2, 3]]` would independently batch-normalize over
the examples on the first two and last two devices. See `jax.lax.psum`
for more details.
"""
use_running_average: Optional[bool] = None
axis: int = -1
momentum: float = 0.99
epsilon: float = 1e-5
dtype: Optional[Dtype] = None
param_dtype: Dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
axis_name: Optional[str] = None
axis_index_groups: Any = None
@compact
def __call__(self, x, use_running_average: Optional[bool] = None):
"""Normalizes the input using batch statistics.
NOTE:
During initialization (when parameters are mutable) the running average
of the batch statistics will not be updated. Therefore, the inputs
fed during initialization don't need to match that of the actual input
distribution and the reduction axis (set with `axis_name`) does not have
to exist.
Args:
x: the input to be normalized.
use_running_average: if true, the statistics stored in batch_stats
will be used instead of computing the batch statistics on the input.
Returns:
Normalized inputs (the same shape as inputs).
"""
use_running_average = merge_param(
'use_running_average', self.use_running_average, use_running_average)
feature_axes = _canonicalize_axes(x.ndim, self.axis)
reduction_axes = tuple(i for i in range(x.ndim) if i not in feature_axes)
feature_shape = [x.shape[ax] for ax in feature_axes]
# see NOTE above on initialization behavior
initializing = self.is_mutable_collection('params')
ra_mean = self.variable('batch_stats', 'mean',
lambda s: jnp.zeros(s, jnp.float32),
feature_shape)
ra_var = self.variable('batch_stats', 'var',
lambda s: jnp.ones(s, jnp.float32),
feature_shape)
if use_running_average:
mean, var = ra_mean.value, ra_var.value
else:
mean, var = _compute_stats(
x, reduction_axes,
dtype=self.dtype,
axis_name=self.axis_name if not initializing else None,
axis_index_groups=self.axis_index_groups)
if not initializing:
ra_mean.value = self.momentum * ra_mean.value + (1 -
self.momentum) * mean
ra_var.value = self.momentum * ra_var.value + (1 - self.momentum) * var
return _normalize(
self, x, mean, var, reduction_axes, feature_axes,
self.dtype, self.param_dtype, self.epsilon,
self.use_bias, self.use_scale,
self.bias_init, self.scale_init)
class LayerNorm(Module):
"""Layer normalization (https://arxiv.org/abs/1607.06450).
Operates on the last axis of the input data.
It normalizes the activations of the layer for each given example in a
batch independently, rather than across a batch like Batch Normalization.
i.e. applies a transformation that maintains the mean activation within
each example close to 0 and the activation standard deviation close to 1.
Attributes:
epsilon: A small float added to variance to avoid dividing by zero.
dtype: the dtype of the result (default: infer from input and params).
param_dtype: the dtype passed to parameter initializers (default: float32).
use_bias: If True, bias (beta) is added.
use_scale: If True, multiply by scale (gamma). When the next layer is linear
(also e.g. nn.relu), this can be disabled since the scaling will be done
by the next layer.
bias_init: Initializer for bias, by default, zero.
scale_init: Initializer for scale, by default, one.
"""
epsilon: float = 1e-6
dtype: Optional[Dtype] = None
param_dtype: Dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
@compact
def __call__(self, x):
"""Applies layer normalization on the input.
Args:
x: the inputs
Returns:
Normalized inputs (the same shape as inputs).
"""
reduction_axes = (-1,)
feature_axes = (-1,)
# TODO(jheek) suport axis_name for model parallelism?
mean, var = _compute_stats(x, reduction_axes, self.dtype, None, None)
return _normalize(
self, x, mean, var, reduction_axes, feature_axes,
self.dtype, self.param_dtype, self.epsilon,
self.use_bias, self.use_scale,
self.bias_init, self.scale_init)
class GroupNorm(Module):
"""Group normalization (arxiv.org/abs/1803.08494).
This op is similar to batch normalization, but statistics are shared across
equally-sized groups of channels and not shared across batch dimension.
Thus, group normalization does not depend on the batch composition and does
not require maintaining internal state for storing statistics.
The user should either specify the total number of channel groups or the
number of channels per group.
Attributes:
num_groups: the total number of channel groups. The default value of 32 is
proposed by the original group normalization paper.
group_size: the number of channels in a group.
epsilon: A small float added to variance to avoid dividing by zero.
dtype: the dtype of the result (default: infer from input and params).
param_dtype: the dtype passed to parameter initializers (default: float32).
use_bias: If True, bias (beta) is added.
use_scale: If True, multiply by scale (gamma). When the next layer is
linear (also e.g. nn.relu), this can be disabled since the scaling will
be done by the next layer.
bias_init: Initializer for bias, by default, zero.
scale_init: Initializer for scale, by default, one.
"""
num_groups: Optional[int] = 32
group_size: Optional[int] = None
epsilon: float = 1e-6
dtype: Optional[Dtype] = None
param_dtype: Dtype = jnp.float32
use_bias: bool = True
use_scale: bool = True
bias_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.zeros
scale_init: Callable[[PRNGKey, Shape, Dtype], Array] = initializers.ones
@compact
def __call__(self, x):
"""Applies group normalization to the input (arxiv.org/abs/1803.08494).
Args:
x: the input of shape N...C, where N is a batch dimension and C is a
channels dimensions. `...` represents an arbitrary number of extra
dimensions that are used to accumulate statistics over.
Returns:
Normalized inputs (the same shape as inputs).
"""
reduction_axes = list(range(1, x.ndim - 1)) + [-1]
feature_axes = (-1,)
if ((self.num_groups is None and self.group_size is None) or
(self.num_groups is not None and self.group_size is not None)):
raise ValueError('Either `num_groups` or `group_size` should be '
'specified. If `group_size` is to be specified, '
'pass `num_groups=None` as argument to override '
'the default `num_groups` value of 32.')
channels = x.shape[-1]
if self.group_size is not None:
if channels % self.group_size != 0:
raise ValueError('Number of channels ({}) is not multiple of the '
'group size ({}).'.format(channels, self.group_size))
num_groups = channels // self.group_size
else:
num_groups = self.num_groups
assert isinstance(num_groups, int)
if num_groups <= 0 or channels % num_groups != 0:
raise ValueError('Number of groups ({}) does not divide the number'
' of channels ({}).'.format(num_groups, channels))
group_size = x.shape[-1] // num_groups
group_shape = x.shape[:-1] + (num_groups, group_size)
def broadcast_stat(stat):
stat = jnp.broadcast_to(stat[..., None],
(x.shape[0], num_groups, group_size))
return stat.reshape((x.shape[0], num_groups * group_size))
# TODO(jheek): suport axis_name for model parallelism?
mean, var = _compute_stats(
x.reshape(group_shape), reduction_axes, self.dtype, None, None)
mean = broadcast_stat(mean)
var = broadcast_stat(var)
return _normalize(
self, x, mean, var, reduction_axes[:-1], feature_axes,
self.dtype, self.param_dtype, self.epsilon,
self.use_bias, self.use_scale,
self.bias_init, self.scale_init)
|
import threading
import psutil
import logging as logger
from time import sleep
from osbot_utils.utils.Files import path_combine, folder_create, file_create
from osbot_utils.utils.Json import json_save_file_pretty, json_load_file, file_exists
from cdr_plugin_folder_to_folder.storage.Storage import Storage
from cdr_plugin_folder_to_folder.utils.Log_Duration import log_duration
logger.basicConfig(level=logger.INFO)
class FileStatus: # todo move to separate file (either per enum or with all enums)
INITIAL = "Initial"
NOT_COPIED = "Will not be copied"
IN_PROGRESS = "In Progress"
COMPLETED = "Completed Successfully"
FAILED = "Completed with errors"
TO_PROCESS = "To Process"
NONE = "None"
class Processing_Status:
STOPPED = "Stopped"
STARTED = "Started"
PHASE_1 = "PHASE 1 - Copying Files"
PHASE_2 = "PHASE 2 - Rebuilding Files"
class Status:
STATUS_FILE_NAME = "status.json"
VAR_COMPLETED = "completed"
VAR_CURRENT_STATUS = "current_status"
VAR_FAILED = "failed"
VAR_FILES_TO_PROCESS = "files_to_process"
VAR_FILES_LEFT_TO_PROCESS = "files_left_to_process"
VAR_FILES_COUNT = "files_in_hd1_folder"
VAR_FILES_COPIED = "files_copied"
VAR_FILES_TO_BE_COPIED = "files_left_to_be_copied"
VAR_IN_PROGRESS = "in_progress"
VAR_NUMBER_OF_CPUS = "number_of_cpus"
VAR_CPU_UTILIZATION = "cpu_utilization"
VAR_RAM_UTILIZATION = "memory_utilization"
VAR_NUM_OF_PROCESSES = "number_of_processes"
VAR_NUM_OF_THREADS = "number_of_threads"
VAR_NETWORK_CONNECTIONS = "network_connections"
VAR_DISK_PARTITIONS = "disk_partitions"
lock = threading.Lock()
_instance = None
def __new__(cls): # singleton pattern
if cls._instance is None:
cls._instance = super(Status, cls).__new__(cls)
return cls._instance
def __init__(self):
if hasattr(self, 'instantiated') is False: # only set these values first time around
self.instantiated = True
self.storage = Storage()
#self._on_save = [] # todo: add support for firing up events when data is saved
self._status_data = self.default_data()
self.status_thread_on = False
self.status_thread = threading.Thread()
@classmethod
def clear_instance(cls):
del cls.instance
def StatusThread(self, update_interval):
while self.status_thread_on:
self.get_server_status()
sleep(update_interval)
def StartStatusThread(self):
if self.status_thread_on:
return
self.status_thread_on = True
self.status_thread = threading.Thread(target=self.StatusThread, args=(1,))
self.status_thread.start()
def StopStatusThread(self):
self.status_thread_on = False
self.status_thread.join()
def data(self):
return self._status_data
def default_data(self):
return { Status.VAR_CURRENT_STATUS : FileStatus.NONE ,
Status.VAR_FILES_COUNT : 0 ,
Status.VAR_FILES_COPIED : 0 ,
Status.VAR_FILES_TO_BE_COPIED : 0 ,
Status.VAR_FILES_TO_PROCESS : 0 ,
Status.VAR_FILES_LEFT_TO_PROCESS : 0 ,
Status.VAR_COMPLETED : 0 ,
Status.VAR_FAILED : 0 ,
Status.VAR_IN_PROGRESS : 0 ,
Status.VAR_NUMBER_OF_CPUS : psutil.cpu_count() ,
Status.VAR_CPU_UTILIZATION : None ,
Status.VAR_RAM_UTILIZATION : None ,
Status.VAR_NUM_OF_PROCESSES : None ,
Status.VAR_NUM_OF_THREADS : None ,
Status.VAR_NETWORK_CONNECTIONS : None ,
Status.VAR_DISK_PARTITIONS : len(psutil.disk_partitions()) ,
}
def load_data(self):
self._status_data = json_load_file(self.status_file_path())
if self.data() == {}:
self.reset()
return self
def reset(self):
self._status_data = self.default_data()
self.save()
return self
def save(self):
if not file_exists(self.status_file_path()):
folder_create( self.storage.hd2_status() )
file_create ( self.status_file_path() )
json_save_file_pretty(self.data(), self.status_file_path())
return self
def status_file_path(self):
return path_combine(self.storage.hd2_status(), Status.STATUS_FILE_NAME)
def get_server_data(self):
self._status_data[Status.VAR_NUMBER_OF_CPUS] = psutil.cpu_count()
self._status_data[Status.VAR_CPU_UTILIZATION] = psutil.cpu_percent(interval=1, percpu=True)
self._status_data[Status.VAR_RAM_UTILIZATION] = psutil.virtual_memory().percent
pids = psutil.pids()
self._status_data[Status.VAR_NUM_OF_PROCESSES] = len(pids)
thread_count = 0
for pid in pids:
try:
p = psutil.Process(int(pid))
process_treads = p.num_threads()
thread_count += process_treads
except:
pass
self._status_data[Status.VAR_NUM_OF_THREADS] = thread_count
self._status_data[Status.VAR_NETWORK_CONNECTIONS] = len(psutil.net_connections(kind='tcp'))
self._status_data[Status.VAR_DISK_PARTITIONS] = len(psutil.disk_partitions())
def get_server_status(self):
Status.lock.acquire()
try:
self.get_server_data()
finally:
Status.lock.release()
self.save()
return self
def set_processing_status(self, processing_status):
Status.lock.acquire()
try:
data = self.data()
data[Status.VAR_CURRENT_STATUS] = processing_status
finally:
Status.lock.release()
self.save()
return self
def set_started (self ): return self.set_processing_status(Processing_Status.STARTED )
def set_stopped (self ): return self.set_processing_status(Processing_Status.STOPPED )
def set_phase_1 (self ): return self.set_processing_status(Processing_Status.PHASE_1 )
def set_phase_2 (self ): return self.set_processing_status(Processing_Status.PHASE_2 )
def update_counters(self, updated_status, count=0):
Status.lock.acquire()
try:
data = self.data()
if updated_status == FileStatus.NONE:
data[Status.VAR_FILES_COUNT] = count
data[Status.VAR_FILES_TO_BE_COPIED] = count
elif updated_status == FileStatus.INITIAL:
data[Status.VAR_FILES_COPIED] += 1
if data[Status.VAR_FILES_TO_BE_COPIED] > 0:
data[Status.VAR_FILES_TO_BE_COPIED] -= 1
elif updated_status == FileStatus.NOT_COPIED:
if data[Status.VAR_FILES_TO_BE_COPIED] > 0:
data[Status.VAR_FILES_TO_BE_COPIED] -= 1
elif updated_status == FileStatus.IN_PROGRESS:
data[Status.VAR_IN_PROGRESS] += 1
elif updated_status == FileStatus.COMPLETED:
data[Status.VAR_COMPLETED] += 1
if data[Status.VAR_IN_PROGRESS] > 0:
data[Status.VAR_IN_PROGRESS] -= 1
if data[Status.VAR_FILES_LEFT_TO_PROCESS] > 0:
data[Status.VAR_FILES_LEFT_TO_PROCESS] -= 1
elif updated_status == FileStatus.FAILED:
data[Status.VAR_FAILED] += 1
if data[Status.VAR_IN_PROGRESS] > 0:
data[Status.VAR_IN_PROGRESS] -= 1
if data[Status.VAR_FILES_LEFT_TO_PROCESS] > 0:
data[Status.VAR_FILES_LEFT_TO_PROCESS] -= 1
elif updated_status == FileStatus.TO_PROCESS:
data[Status.VAR_FILES_TO_PROCESS] += 1
data[Status.VAR_FILES_LEFT_TO_PROCESS] += 1
finally:
Status.lock.release()
self.save()
return self
def set_processing_counters(self, count):
Status.lock.acquire()
try:
data = self.data()
data[Status.VAR_IN_PROGRESS] = 0
data[Status.VAR_FAILED] = 0
data[Status.VAR_COMPLETED] = 0
data[Status.VAR_FILES_TO_PROCESS] = count
data[Status.VAR_FILES_LEFT_TO_PROCESS] = count
finally:
Status.lock.release()
self.save()
return self
def add_completed (self ): return self.update_counters(FileStatus.COMPLETED )
def add_failed (self ): return self.update_counters(FileStatus.FAILED )
def add_file (self ): return self.update_counters(FileStatus.INITIAL )
def set_files_count (self, count): return self.update_counters(FileStatus.NONE , count)
def set_not_copied (self ): return self.update_counters(FileStatus.NOT_COPIED )
def add_in_progress (self ): return self.update_counters(FileStatus.IN_PROGRESS )
def add_to_be_processed (self ): return self.update_counters(FileStatus.TO_PROCESS )
def get_completed (self): return self.data().get(Status.VAR_COMPLETED)
def get_current_status (self): return self.data().get(Status.VAR_CURRENT_STATUS)
def get_failed (self): return self.data().get(Status.VAR_FAILED)
def get_files_count (self): return self.data().get(Status.VAR_FILES_COUNT)
def get_files_copied (self): return self.data().get(Status.VAR_FILES_COPIED)
def get_files_to_process(self): return self.data().get(Status.VAR_FILES_TO_PROCESS)
def get_in_progress (self): return self.data().get(Status.VAR_IN_PROGRESS)
|
import pytest
from django.contrib import auth
from django.urls import reverse
# pylint: disable=unused-argument
@pytest.mark.django_db
@pytest.mark.parametrize(
"username", ["root", "root@root.root", "management", "management@example.com"]
)
def test_login_success(load_test_data, client, settings, username):
"""
Test whether login via username & email works as expected
:param load_test_data: The fixture providing the test data (see :meth:`~tests.conftest.load_test_data`)
:type load_test_data: tuple
:param client: The fixture providing the an unauthenticated user client
:type client: :fixture:`client`
:param settings: The Django settings
:type settings: :fixture:`settings`
:param username: The username/email to use for login
:type username: str
"""
# Test login via username/password
response = client.post(
settings.LOGIN_URL, data={"username": username, "password": "root1234"}
)
print(response.headers)
assert response.status_code == 302
assert response.headers.get("location") == settings.LOGIN_REDIRECT_URL
response = client.get(settings.LOGIN_REDIRECT_URL)
print(response.headers)
assert response.status_code == 302
user = auth.get_user(client)
assert user.is_authenticated
if user.is_superuser or user.is_staff:
# Root user should get redirected to the admin dashboard
assert response.headers.get("location") == reverse("admin_dashboard")
else:
# Region user should get redirected to the region dashboard
assert response.headers.get("location") == reverse(
"dashboard", kwargs={"region_slug": "augsburg"}
)
# pylint: disable=unused-argument
@pytest.mark.django_db
@pytest.mark.parametrize(
"username",
[
"root",
"root@root.root",
"management",
"management@example.com",
"non-existing-user",
"non-existing-email@example.com",
"",
],
)
def test_login_failure(load_test_data, client, settings, username):
"""
Test whether login with incorrect credentials does not work
:param load_test_data: The fixture providing the test data (see :meth:`~tests.conftest.load_test_data`)
:type load_test_data: tuple
:param client: The fixture providing the an unauthenticated user client
:type client: :fixture:`client`
:param settings: The Django settings
:type settings: :fixture:`settings`
:param username: The username/email to use for login
:type username: str
"""
# Test for english messages
settings.LANGUAGE_CODE = "en"
# Test login via username/password
response = client.post(
settings.LOGIN_URL, data={"username": username, "password": "incorrect"}
)
print(response.headers)
assert response.status_code == 200
assert "The username or the password is incorrect." in response.content.decode()
|
"""
sentry.cache.django
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.core.cache import cache
from .base import BaseCache
class DjangoCache(BaseCache):
def set(self, key, value, timeout, version=None, raw=False):
cache.set(key, value, timeout, version=version or self.version)
def delete(self, key, version=None):
cache.delete(key, version=version or self.version)
def get(self, key, version=None, raw=False):
return cache.get(key, version=version or self.version)
|
import sys
from database import db_session, init_db, init_engine
from Student import Student
init_engine("sqlite:///schooldb.sqlite")
init_db()
def add_user():
first_name = input("Ecrie ton first_name:")
last_name = input("Ecrie ton last_name:")
age = input("Ecrie ton age:")
email = input("Ecrie ton email:")
new_student = Student(first_name, last_name, age, email)
db_session.add(new_student)
db_session.commit()
# db_session.delete(eleve);
def remove_user():
students = Student.query.filter().all()
for student in students:
student.print_self()
email_remove = input("Ecrie le email de la personne que tu veut supprimer:")
student_delete = None
for student in students:
if student.email == email_remove:
db_session.delete(student)
db_session.commit()
def print_users():
students = Student.query.filter(Student.is_expelled == False).all()
for student in students:
student.print_self()
def expell_student():
students = Student.query.filter().all()
for student in students:
student.print_self()
email_expell = input("Ecrie l'email de la personne que tu veut expulser:")
students = Student.query.filter(Student.email == email_expell).first()
print(students)
student.expell()
db_session.add(students)
db_session.commit()
print()
user_input = ""
print('Bienvenue au System de gestion de l\'ecole lambda!')
while(user_input != 'exit'):
print('a - ajouter un éleve.')
print('b - supprimer un éleve.')
print('c - afficher les éleves.')
print("d - expulser un éleve. ")
user_input = input('>')
if user_input is 'a':
add_user()
if user_input is 'b':
remove_user()
if user_input is 'c':
print_users()
if user_input is 'd':
expell_student()
exit()
|
#!/bin/env python
import validators
import random
import uuid
import datetime
import lorem
from random import randint
themesSample = {
"anti_corruption": {
"color": "#3a8789",
"label": "Anti Corruption",
"order": 0,
"value": "anti_corruption"
},
"disease": {
"color": "#ed9e2b",
"label": "Disease",
"order": 1,
"value": "disease"
},
"good_governance": {
"color": "#f7c889",
"label": "Governance",
"order": 2,
"value": "good_governance"
},
"rule_of_law": {
"color": "#3a8789",
"label": "Rule of law",
"order": 3,
"value": "rule_of_law"
},
"sanitation": {
"color": "#ed9e2b",
"label": "Sanitation",
"order": 4,
"value": "sanitation"
},
"strengthen_police": {
"color": "#f7c889",
"label": "Strengthen police",
"order": 5,
"value": "strengthen_police"
},
}
def validateThemes(themes):
for attr, value in themes.items():
validators.validate_hexcolor(value["color"])
validators.validate_string(value["label"])
validators.validate_int(value["order"])
validators.validate_string(value["value"])
assert attr == value["value"]
def themes():
validateThemes(themesSample)
return themesSample
filtersSample = {
"age_category": {
"value": "age_category",
"label": "Age",
"order": 0,
"options": [
{"value": "18_35", "label": "18 to 35 years"},
{"value": "35_50", "label": "35 to 50 years"},
{"value": "50_65", "label": "50 to 65 years"}
]
},
"gender": {
"value": "gender",
"label": "Gender",
"order": 1,
"options": [
{"value": "male", "label": "Male"},
{"value": "female", "label": "Female"},
{"value": "unknown", "label": "Unknown"}
]
},
"idp_status": {
"value": "idp_status",
"label": "IDP Status",
"order": 2,
"options": [
{"value": "status_a", "label": "Status A"},
{"value": "status_b", "label": "Status B"},
{"value": "status_c", "label": "Status C"}
]
}
}
def validateFilters(filters):
for attr, value in filters.items():
validators.validate_string(value["value"])
assert (attr == value["value"])
validators.validate_string(value["label"])
validators.validate_int(value["order"])
validators.validate_list(value["options"])
for option in value["options"]:
validators.validate_string(option["value"])
validators.validate_string(option["label"])
def filters():
validateFilters(filtersSample)
return filtersSample
def getAgeRange(age):
if (age >= 18 and age <= 35):
return "18_35"
elif (age > 35 and age <= 50):
return "35_50"
elif (age > 50 and age <= 65):
return "50_65"
else:
print("Age not within range")
return 0
def randGender():
genders = ["male", "female", "unknown"]
return random.choice(genders)
def randIDPStatus():
status = ["status_a", "status_b", "status_c"]
return random.choice(status)
def randLocation():
location = ["Mogadishu", "Hargeysa",
"Merca", "Berbera", "Kismaayo", "Borama"]
return random.choice(location)
def randThemes():
themes = list(themesSample.keys())
return random.sample(themes, randint(1, 3))
def samplePeople(i):
age = randint(18, 65)
return {
"id": str(i),
"age": age,
"age_category": getAgeRange(age),
"gender": randGender(),
"idp_status": randIDPStatus(),
"location": randLocation(),
"themes": randThemes(),
"message_count": randint(3, 50)
}
def validatePeople(people):
for person in people:
validators.validate_string(person["id"])
validators.validate_int(person["age"])
validators.validate_string(person["age_category"])
validators.validate_string(person["gender"])
validators.validate_string(person["idp_status"])
validators.validate_string(person["location"])
validators.validate_list(person["themes"])
for theme in person["themes"]:
validators.validate_string(theme)
validators.validate_int(person["message_count"])
def people(count):
peopleList = list(map(samplePeople, list(range(count))))
validatePeople(peopleList)
return peopleList
def sampleMessages(i):
messages = list()
for i in range(randint(5, 20)):
isResponse = random.choice([True, False])
themes = list(themesSample.keys())
theme = None if not isResponse else random.choice(themes)
theme = theme if randint(0, 1) % 2 == 0 else None
message = {
"id": str(i),
"text": lorem.sentence(),
"theme": theme,
"time": datetime.datetime(2020, 1, 1, i+1, 0, 0),
"is_response": isResponse
}
messages.append(message)
return messages
def validateMessages(messages):
for message in messages:
validators.validate_string(message["id"])
validators.validate_string(message["text"])
if message["theme"] is not None:
validators.validate_string(message["theme"])
validators.validate_datetime(message["time"])
validators.validate_bool(message["is_response"])
def messages(i):
messagesList = list()
for i in range(i):
messages = sampleMessages(i)
validateMessages(messages)
messagesList.append(messages)
return messagesList
|
# Copyright 2020 Jigsaw Operations LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the beam pipeline."""
import datetime
from typing import Dict, List
import unittest
import json
import apache_beam as beam
from apache_beam.io.gcp.internal.clients import bigquery as beam_bigquery
from apache_beam.testing.test_pipeline import TestPipeline
import apache_beam.testing.util as beam_test_util
from pipeline import beam_tables
from pipeline.metadata.fake_caida_ip_metadata import FakeCaidaIpMetadata
from pipeline.metadata.maxmind import FakeMaxmindIpMetadata
from pipeline.metadata.dbip import FakeDbipMetadata
from pipeline.metadata import flatten
class PipelineMainTest(unittest.TestCase):
"""Unit tests for beam pipeline steps."""
# pylint: disable=protected-access
def test_get_bigquery_schema(self) -> None:
"""Test getting the right bigquery schema for data types."""
echo_schema = beam_tables._get_bigquery_schema('echo')
self.assertEqual(echo_schema, beam_tables.SCAN_BIGQUERY_SCHEMA)
satellite_schema = beam_tables._get_bigquery_schema('satellite')
all_satellite_top_level_columns = (
list(beam_tables.SCAN_BIGQUERY_SCHEMA.keys()) +
list(beam_tables.SATELLITE_BIGQUERY_SCHEMA.keys()))
self.assertListEqual(
list(satellite_schema.keys()), all_satellite_top_level_columns)
def test_get_beam_bigquery_schema(self) -> None:
"""Test making a bigquery schema for beam's table writing."""
test_field = {
'string_field': ('string', 'nullable'),
'int_field': ('integer', 'repeated'),
}
table_schema = beam_tables._get_beam_bigquery_schema(test_field)
expected_field_schema_1 = beam_bigquery.TableFieldSchema()
expected_field_schema_1.name = 'string_field'
expected_field_schema_1.type = 'string'
expected_field_schema_1.mode = 'nullable'
expected_field_schema_2 = beam_bigquery.TableFieldSchema()
expected_field_schema_2.name = 'int_field'
expected_field_schema_2.type = 'integer'
expected_field_schema_2.mode = 'repeated'
expected_table_schema = beam_bigquery.TableSchema()
expected_table_schema.fields.append(expected_field_schema_1)
expected_table_schema.fields.append(expected_field_schema_2)
self.assertEqual(table_schema, expected_table_schema)
def test_get_table_name(self) -> None:
"""Test creating a table name given params."""
base_table_name = 'scan'
prod_dataset = 'base'
user_dataset = 'laplante'
self.assertEqual(
beam_tables.get_table_name(prod_dataset, 'echo', base_table_name),
'base.echo_scan')
self.assertEqual(
beam_tables.get_table_name(user_dataset, 'discard', base_table_name),
'laplante.discard_scan')
self.assertEqual(
beam_tables.get_table_name(prod_dataset, 'http', base_table_name),
'base.http_scan')
self.assertEqual(
beam_tables.get_table_name(user_dataset, 'https', base_table_name),
'laplante.https_scan')
def test_get_job_name(self) -> None:
"""Test getting the name for the beam job"""
self.assertEqual(
beam_tables.get_job_name('base.scan_echo', False),
'write-base-scan-echo')
self.assertEqual(
beam_tables.get_job_name('base.scan_discard', True),
'append-base-scan-discard')
self.assertEqual(
beam_tables.get_job_name('laplante.scan_http', False),
'write-laplante-scan-http')
self.assertEqual(
beam_tables.get_job_name('laplante.scan_https', True),
'append-laplante-scan-https')
def test_get_full_table_name(self) -> None:
project = 'firehook-censoredplanet'
runner = beam_tables.ScanDataBeamPipelineRunner(project, '', '', '',
FakeCaidaIpMetadata, '',
FakeMaxmindIpMetadata, '',
FakeDbipMetadata, '')
full_name = runner._get_full_table_name('prod.echo_scan')
self.assertEqual(full_name, 'firehook-censoredplanet:prod.echo_scan')
def test_read_scan_text(self) -> None: # pylint: disable=no-self-use
"""Test reading lines from compressed and uncompressed files"""
p = TestPipeline()
pipeline = beam_tables._read_scan_text(
p, ['pipeline/test_results_1.json', 'pipeline/test_results_2.json.gz'])
beam_test_util.assert_that(
pipeline,
beam_test_util.equal_to([
'test line 1.1', 'test line 1.2', 'test line 2.1', 'test line 2.2'
]))
def test_between_dates(self) -> None:
"""Test logic to include filenames based on their creation dates."""
filename = 'gs://firehook-scans/http/CP_Quack-http-2020-05-11-01-02-08/results.json'
self.assertTrue(
beam_tables._between_dates(filename, datetime.date(2020, 5, 10),
datetime.date(2020, 5, 12)))
self.assertTrue(
beam_tables._between_dates(filename, datetime.date(2020, 5, 11),
datetime.date(2020, 5, 11)))
self.assertTrue(
beam_tables._between_dates(filename, None, datetime.date(2020, 5, 12)))
self.assertTrue(
beam_tables._between_dates(filename, datetime.date(2020, 5, 10), None))
self.assertTrue(beam_tables._between_dates(filename, None, None))
def test_not_between_dates(self) -> None:
"""Test logic to filter filenames based on their creation dates."""
filename = 'gs://firehook-scans/http/CP_Quack-http-2020-05-11-01-02-08/results.json'
self.assertFalse(
beam_tables._between_dates(filename, datetime.date(2020, 5, 12),
datetime.date(2020, 5, 10)))
self.assertFalse(
beam_tables._between_dates(filename, None, datetime.date(2020, 5, 10)))
self.assertFalse(
beam_tables._between_dates(filename, datetime.date(2020, 5, 12), None))
def test_add_metadata(self) -> None: # pylint: disable=no-self-use
"""Test adding IP metadata to mesurements."""
rows: List[beam_tables.Row] = [{
'domain': 'www.example.com',
'ip': '8.8.8.8',
'date': '2020-01-01',
'success': True,
}, {
'domain': 'www.example.com',
'ip': '1.1.1.1',
'date': '2020-01-01',
'success': False,
}, {
'domain': 'www.example.com',
'ip': '8.8.8.8',
'date': '2020-01-02',
'success': False,
}, {
'domain': 'www.example.com',
'ip': '1.1.1.1',
'date': '2020-01-02',
'success': True,
}]
p = TestPipeline()
rows = (p | beam.Create(rows))
runner = beam_tables.ScanDataBeamPipelineRunner('', '', '', '',
FakeCaidaIpMetadata, '',
FakeMaxmindIpMetadata, '',
FakeDbipMetadata, '')
rows_with_metadata = runner._add_metadata(rows)
beam_test_util.assert_that(
rows_with_metadata,
beam_test_util.equal_to([{
'domain': 'www.example.com',
'ip': '8.8.8.8',
'date': '2020-01-01',
'success': True,
'netblock': '8.8.8.0/24',
'asn': 15169,
'as_name': 'GOOGLE',
'as_full_name': 'Google LLC',
'as_class': 'Content',
'country': 'US',
}, {
'domain': 'www.example.com',
'ip': '1.1.1.1',
'date': '2020-01-01',
'success': False,
'netblock': '1.0.0.1/24',
'asn': 13335,
'as_name': 'CLOUDFLARENET',
'as_full_name': 'Cloudflare Inc.',
'as_class': 'Content',
'country': 'US',
}, {
'domain': 'www.example.com',
'ip': '8.8.8.8',
'date': '2020-01-02',
'success': False,
'netblock': '8.8.8.0/24',
'asn': 15169,
'as_name': 'GOOGLE',
'as_full_name': 'Google LLC',
'as_class': 'Content',
'country': 'US',
}, {
'domain': 'www.example.com',
'ip': '1.1.1.1',
'date': '2020-01-02',
'success': True,
'netblock': '1.0.0.1/24',
'asn': 13335,
'as_name': 'CLOUDFLARENET',
'as_full_name': 'Cloudflare Inc.',
'as_class': 'Content',
'country': 'US',
}]))
def test_make_date_ip_key(self) -> None:
row = {'date': '2020-01-01', 'ip': '1.2.3.4', 'other_field': None}
self.assertEqual(
beam_tables._make_date_ip_key(row), ('2020-01-01', '1.2.3.4'))
def test_add_ip_metadata_caida(self) -> None:
"""Test merging given IP metadata with given measurements."""
runner = beam_tables.ScanDataBeamPipelineRunner('', '', '', '',
FakeCaidaIpMetadata, '',
FakeMaxmindIpMetadata, '',
FakeDbipMetadata, '')
metadatas = list(
runner._add_ip_metadata('2020-01-01', ['1.1.1.1', '8.8.8.8']))
expected_key_1: beam_tables.DateIpKey = ('2020-01-01', '1.1.1.1')
expected_value_1: beam_tables.Row = {
'netblock': '1.0.0.1/24',
'asn': 13335,
'as_name': 'CLOUDFLARENET',
'as_full_name': 'Cloudflare Inc.',
'as_class': 'Content',
'country': 'US',
'organization': 'Fake Cloudflare Sub-Org',
}
expected_key_2: beam_tables.DateIpKey = ('2020-01-01', '8.8.8.8')
expected_value_2: beam_tables.Row = {
'netblock': '8.8.8.0/24',
'asn': 15169,
'as_name': 'GOOGLE',
'as_full_name': 'Google LLC',
'as_class': 'Content',
'country': 'US',
# No organization data is added since the ASN doesn't match dbip
}
self.assertListEqual(metadatas, [(expected_key_1, expected_value_1),
(expected_key_2, expected_value_2)])
def disabled_test_add_ip_metadata_maxmind(self) -> None:
"""Test merging given IP metadata with given measurements."""
# TODO turn back on once maxmind is reenabled.
runner = beam_tables.ScanDataBeamPipelineRunner('', '', '', '',
FakeCaidaIpMetadata, '',
FakeMaxmindIpMetadata, '',
FakeDbipMetadata, '')
metadatas = list(runner._add_ip_metadata('2020-01-01', ['1.1.1.3']))
# Test Maxmind lookup when country data is missing
# Cloudflare IPs return Australia
expected_key_1 = ('2020-01-01', '1.1.1.3')
expected_value_1 = {
'netblock': '1.0.0.1/24',
'asn': 13335,
'as_name': 'CLOUDFLARENET',
'as_full_name': 'Cloudflare Inc.',
'as_class': 'Content',
'country': None,
'organization': 'Fake Cloudflare Sub-Org',
}
expected_value_1['country'] = 'AU'
self.assertListEqual(metadatas, [(expected_key_1, expected_value_1)])
def test_merge_metadata_with_rows(self) -> None:
"""Test merging IP metadata pcollection with rows pcollection."""
key: beam_tables.DateIpKey = ('2020-01-01', '1.1.1.1')
ip_metadata: beam_tables.Row = {
'netblock': '1.0.0.1/24',
'asn': 13335,
'as_name': 'CLOUDFLARENET',
'as_full_name': 'Cloudflare Inc.',
'as_class': 'Content',
'country': 'US',
}
rows: List[beam_tables.Row] = [{
'domain': 'www.example.com',
'ip': '1.1.1.1',
'date': '2020-01-01',
}, {
'domain': 'www.example2.com',
'ip': '1.1.1.1',
'date': '2020-01-01',
}]
value: Dict[str, List[beam_tables.Row]] = {
beam_tables.IP_METADATA_PCOLLECTION_NAME: [ip_metadata],
beam_tables.ROWS_PCOLLECION_NAME: rows
}
expected_rows = [{
'domain': 'www.example.com',
'ip': '1.1.1.1',
'date': '2020-01-01',
'netblock': '1.0.0.1/24',
'asn': 13335,
'as_name': 'CLOUDFLARENET',
'as_full_name': 'Cloudflare Inc.',
'as_class': 'Content',
'country': 'US',
}, {
'domain': 'www.example2.com',
'ip': '1.1.1.1',
'date': '2020-01-01',
'netblock': '1.0.0.1/24',
'asn': 13335,
'as_name': 'CLOUDFLARENET',
'as_full_name': 'Cloudflare Inc.',
'as_class': 'Content',
'country': 'US',
}]
rows_with_metadata = list(beam_tables._merge_metadata_with_rows(key, value))
self.assertListEqual(rows_with_metadata, expected_rows)
def test_read_satellite_tags(self) -> None:
"""Test reading rows from Satellite tag files."""
tagged_resolver1 = {'resolver': '1.1.1.1', 'country': 'United States'}
tagged_resolver2 = {'resolver': '1.1.1.3', 'country': 'Australia'}
# yapf: disable
tagged_answer1 = {
'ip': '60.210.17.137',
'asname': 'CHINA169-BACKBONE CHINA UNICOM China169 Backbone',
'asnum': 4837,
'cert': 'a2fed117238c94a04ba787cfe69e93de36cc8571bab44d5481df9becb9beec75',
'http': 'e3c1d34ca489928190b45f0535624b872717d1edd881c8ab4b2c62f898fcd4a5'
}
row1 = {'ip': '1.1.1.1', 'date': '2020-12-17', 'country': 'US'}
row2 = {'ip': '1.1.1.3', 'date': '2020-12-17', 'country': 'AU'}
row3 = {
'ip': '60.210.17.137',
'date': '2020-12-17',
'asname': 'CHINA169-BACKBONE CHINA UNICOM China169 Backbone',
'asnum': 4837,
'cert': 'a2fed117238c94a04ba787cfe69e93de36cc8571bab44d5481df9becb9beec75',
'http': 'e3c1d34ca489928190b45f0535624b872717d1edd881c8ab4b2c62f898fcd4a5'
}
# yapf: enable
data = [
json.dumps(tagged_resolver1),
json.dumps(tagged_resolver2),
json.dumps(tagged_answer1)
]
expected = [row1, row2, row3]
result = [
next(beam_tables._read_satellite_tags('2020-12-17', d)) for d in data
]
self.assertListEqual(result, expected)
def test_process_satellite_v1(self) -> None: # pylint: disable=no-self-use
"""Test processing of Satellite v1 interference and tag files."""
# yapf: disable
_data = [
("CP_Satellite-2020-09-02-12-00-01/interference.json", {'resolver': '1.1.1.3','query': 'signal.org', 'answers': {'13.249.134.38': ['ip', 'http', 'asnum', 'asname'], '13.249.134.44': ['ip', 'http', 'asnum', 'asname'],'13.249.134.74': ['ip', 'http', 'asnum', 'asname'], '13.249.134.89': ['ip', 'http', 'asnum', 'asname']}, 'passed': True}),
("CP_Satellite-2020-09-02-12-00-01/interference.json", {'resolver': '1.1.1.3','query': 'adl.org', 'answers': {'192.124.249.107': ['ip', 'no_tags']}, 'passed': True}),
]
data = [(filename, json.dumps(d)) for filename, d in _data]
_tags = [
("CP_Satellite-2020-09-02-12-00-01/resolvers.json", {'name': 'special','resolver': '1.1.1.3'}),
("CP_Satellite-2020-09-02-12-00-01/tagged_resolvers.json", {'resolver': '1.1.1.3', 'country': 'United States'}),
("CP_Satellite-2020-09-02-12-00-01/tagged_answers.json", {'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': 'c5ba7f2da503045170f1d66c3e9f84576d8f3a606bb246db589a8f62c65921af','ip': '13.249.134.38'}),
("CP_Satellite-2020-09-02-12-00-01/tagged_answers.json", {'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': '256e35b8bace0e9fe95f308deb35f82117cd7317f90a08f181516c31abe95b71','ip': '13.249.134.44'}),
("CP_Satellite-2020-09-02-12-00-01/tagged_answers.json", {'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': '2054d0fd3887e0ded023879770d6cde57633b7881f609f1042d90fedf41685fe','ip': '13.249.134.74'}),
("CP_Satellite-2020-09-02-12-00-01/tagged_answers.json", {'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': '0509322329cdae79475531a019a3628aa52598caa0135c5534905f0c4b4f1bac','ip': '13.249.134.89'})
]
tags = [(filename, json.dumps(t)) for filename, t in _tags]
expected = [
{
'ip': '1.1.1.3',
'country': 'US',
'name': 'special',
'domain': 'signal.org',
'category': 'Communication Tools',
'error': None,
'anomaly': False,
'success': True,
'received': [
{'ip': '13.249.134.38', 'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': 'c5ba7f2da503045170f1d66c3e9f84576d8f3a606bb246db589a8f62c65921af', 'matches_control': 'ip http asnum asname'},
{'ip': '13.249.134.44', 'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': '256e35b8bace0e9fe95f308deb35f82117cd7317f90a08f181516c31abe95b71', 'matches_control': 'ip http asnum asname'},
{'ip': '13.249.134.74', 'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': '2054d0fd3887e0ded023879770d6cde57633b7881f609f1042d90fedf41685fe', 'matches_control': 'ip http asnum asname'},
{'ip': '13.249.134.89', 'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': '0509322329cdae79475531a019a3628aa52598caa0135c5534905f0c4b4f1bac', 'matches_control': 'ip http asnum asname'}
],
'date': '2020-09-02'
},
{
'ip': '1.1.1.3',
'country': 'US',
'name': 'special',
'domain': 'adl.org',
'category': 'Religion',
'error': None,
'anomaly': False,
'success': True,
'received': [
{'ip': '192.124.249.107', 'matches_control': 'ip'}
],
'date': '2020-09-02'
}
]
# yapf: enable
with TestPipeline() as p:
lines = p | 'create data' >> beam.Create(data)
lines2 = p | 'create tags' >> beam.Create(tags)
final = beam_tables._process_satellite_with_tags(lines, lines2)
beam_test_util.assert_that(final, beam_test_util.equal_to(expected))
def test_process_satellite_v2(self) -> None: # pylint: disable=no-self-use
"""Test processing of Satellite v2 interference and tag files."""
# yapf: disable
data = [
("CP_Satellite-2021-03-01-12-00-01/results.json", """{"vp":"185.228.169.37","location":{"country_code":"IE","country_name":"Ireland"},"test_url":"ar.m.wikipedia.org","response":{"198.35.26.96":["cert","asnum","asname"],"rcode":["0","0","0"]},"passed_control":true,"connect_error":false,"in_control_group":true,"anomaly":false,"confidence":{"average":60,"matches":[60],"untagged_controls":false,"untagged_response":false},"start_time":"2021-03-01 12:43:25.3438285 -0500 EST m=+0.421998701","end_time":"2021-03-01 12:43:25.3696119 -0500 EST m=+0.447782001"}"""),
("CP_Satellite-2021-03-01-12-00-01/results.json", """{"vp":"156.154.71.37","location":{"country_code":"US","country_name":"United States"},"test_url":"www.usacasino.com","response":{"15.126.193.233":["no_tags"],"rcode":["0","0","0"]},"passed_control":true,"connect_error":false,"in_control_group":true,"anomaly":true,"confidence":{"average":0,"matches":[0],"untagged_controls":false,"untagged_response":true},"start_time":"2021-03-01 12:43:25.3438285 -0500 EST m=+0.421998701","end_time":"2021-03-01 12:43:25.3696119 -0500 EST m=+0.447782001"}"""),
("CP_Satellite-2021-04-18-12-00-01/results.json", """{"vp":"87.119.233.243","location":{"country_name":"Russia","country_code":"RU"},"test_url":"feedly.com","response":{},"passed_control":false,"connect_error":true,"in_control_group":true,"anomaly":false,"confidence":{"average":0,"matches":null,"untagged_controls":false,"untagged_response":false},"start_time":"2021-04-18 14:49:01.62448452 -0400 EDT m=+10140.555964129","end_time":"2021-04-18 14:49:03.624563629 -0400 EDT m=+10142.556043238"}"""),
("CP_Satellite-2021-04-18-12-00-01/results.json", """{"vp":"12.5.76.236","location":{"country_name":"United States","country_code":"US"},"test_url":"ultimate-guitar.com","response":{"rcode":["2"]},"passed_control":true,"connect_error":false,"in_control_group":true,"anomaly":true,"confidence":{"average":0,"matches":null,"untagged_controls":false,"untagged_response":false},"start_time":"2021-04-18 14:49:07.712972288 -0400 EDT m=+10146.644451890","end_time":"2021-04-18 14:49:07.749265765 -0400 EDT m=+10146.680745375"}"""),
("CP_Satellite-2021-04-18-12-00-01/responses_control.json", """{"vp":"64.6.65.6","test_url":"ultimate-guitar.com","response":[{"url":"a.root-servers.net","has_type_a":true,"response":["198.41.0.4"],"error":"null","rcode":0,"start_time":"2021-04-18 14:51:57.561175746 -0400 EDT m=+10316.492655353","end_time":"2021-04-18 14:51:57.587097567 -0400 EDT m=+10316.518577181"},{"url":"ultimate-guitar.com","has_type_a":true,"response":["178.18.22.152"],"error":"null","rcode":0,"start_time":"2021-04-18 14:51:57.587109091 -0400 EDT m=+10316.518588694","end_time":"2021-04-18 14:51:57.61294601 -0400 EDT m=+10316.544425613"}],"passed_control":true,"connect_error":false}"""),
("CP_Satellite-2021-04-18-12-00-01/responses_control.json", """{"vp":"64.6.65.6","test_url":"www.awid.org","response":[{"url":"a.root-servers.net","has_type_a":true,"response":["198.41.0.4"],"error":"null","rcode":0,"start_time":"2021-04-18 14:51:45.836310062 -0400 EDT m=+10304.767789664","end_time":"2021-04-18 14:51:45.862080031 -0400 EDT m=+10304.793559633"},{"url":"www.awid.org","has_type_a":false,"response":null,"error":"read udp 141.212.123.185:39868->64.6.65.6:53: i/o timeout","rcode":-1,"start_time":"2021-04-18 14:51:45.862091022 -0400 EDT m=+10304.793570624","end_time":"2021-04-18 14:51:47.862170832 -0400 EDT m=+10306.793650435"},{"url":"www.awid.org","has_type_a":true,"response":["204.187.13.189"],"error":"null","rcode":0,"start_time":"2021-04-18 14:51:47.862183185 -0400 EDT m=+10306.793662792","end_time":"2021-04-18 14:51:48.162724942 -0400 EDT m=+10307.094204544"}],"passed_control":true,"connect_error":false}""")
]
tags = [
("CP_Satellite-2021-03-01-12-00-01/tagged_resolvers.json", """{"location":{"country_code":"IE","country_name":"Ireland"},"vp":"185.228.169.37"}"""),
("CP_Satellite-2021-03-01-12-00-01/tagged_resolvers.json", """{"location":{"country_code":"US","country_name":"United States"},"vp":"156.154.71.37"}"""),
("CP_Satellite-2021-03-01-12-00-01/resolvers.json", """{"name":"rdns37b.ultradns.net.","vp":"156.154.71.37"}"""),
("CP_Satellite-2021-03-01-12-00-01/resolvers.json", """{"name":"customfilter37-dns2.cleanbrowsing.org.","vp":"185.228.169.37"}"""),
("CP_Satellite-2021-03-01-12-00-01/tagged_responses.json", """{"asname":"WIKIMEDIA","asnum":14907,"cert":"9eb21a74a3cf1ecaaf6b19253025b4ca38f182e9f1f3e7355ba3c3004d4b7a10","http":"7b4b4d1bfb0a645c990f55557202f88be48e1eee0c10bdcc621c7b682bf7d2ca","ip":"198.35.26.96"}"""),
("CP_Satellite-2021-04-18-12-00-01/resolvers.json", """{"name":"87-119-233-243.saransk.ru.","vp":"87.119.233.243"}"""),
("CP_Satellite-2021-04-18-12-00-01/resolvers.json", """{"name":"ns1327.ztomy.com.","vp":"12.5.76.236"}"""),
("CP_Satellite-2021-04-18-12-00-01/resolvers.json", """{"name": "rec1pubns2.ultradns.net.", "vp": "64.6.65.6"}"""),
]
expected = [
{
'ip': '185.228.169.37',
'country': 'IE',
'name': 'customfilter37-dns2.cleanbrowsing.org.',
'domain': 'ar.m.wikipedia.org',
'category': 'Culture',
'error': None,
'anomaly': False,
'success': True,
'controls_failed': False,
'received': [
{'ip': '198.35.26.96', 'asname': 'WIKIMEDIA','asnum': 14907,'cert': '9eb21a74a3cf1ecaaf6b19253025b4ca38f182e9f1f3e7355ba3c3004d4b7a10','http': '7b4b4d1bfb0a645c990f55557202f88be48e1eee0c10bdcc621c7b682bf7d2ca', 'matches_control': 'cert asnum asname'},
],
'rcode': ['0', '0', '0'],
'date': '2021-03-01',
'start_time': '2021-03-01T12:43:25.3438285-05:00',
'end_time': '2021-03-01T12:43:25.3696119-05:00'
},
{
'ip': '156.154.71.37',
'country': 'US',
'name': 'rdns37b.ultradns.net.',
'domain': 'www.usacasino.com',
'category': 'Gambling',
'error': None,
'anomaly': True,
'success': True,
'controls_failed': False,
'received': [
{'ip': '15.126.193.233', 'matches_control': ''},
],
'rcode': ['0', '0', '0'],
'date': '2021-03-01',
'start_time': '2021-03-01T12:43:25.3438285-05:00',
'end_time': '2021-03-01T12:43:25.3696119-05:00'
},
{
'ip': '87.119.233.243',
'country': 'RU',
'name': '87-119-233-243.saransk.ru.',
'domain': 'feedly.com',
'category': 'E-commerce',
'error': None,
'anomaly': False,
'success': False,
'controls_failed': True,
'received': [],
'rcode': [],
'date': '2021-04-18',
'start_time': '2021-04-18T14:49:01.62448452-04:00',
'end_time': '2021-04-18T14:49:03.624563629-04:00'
},
{
'ip': '12.5.76.236',
'country': 'US',
'name': 'ns1327.ztomy.com.',
'domain': 'ultimate-guitar.com',
'category': 'History arts and literature',
'error': None,
'anomaly': True,
'success': True,
'controls_failed': False,
'received': [],
'rcode': ['2'],
'date': '2021-04-18',
'start_time': '2021-04-18T14:49:07.712972288-04:00',
'end_time': '2021-04-18T14:49:07.749265765-04:00'
},
{
'ip': '64.6.65.6',
'name': 'rec1pubns2.ultradns.net.',
'domain': 'ultimate-guitar.com',
'category': 'History arts and literature',
'error': None,
'anomaly': None,
'success': True,
'controls_failed': False,
'has_type_a': True,
'received': [
{'ip': '178.18.22.152'}
],
'rcode': ['0', '0'],
'date': '2021-04-18',
'start_time': '2021-04-18T14:51:57.561175746-04:00',
'end_time': '2021-04-18T14:51:57.61294601-04:00'
},
{
'ip': '64.6.65.6',
'name': 'rec1pubns2.ultradns.net.',
'domain': 'www.awid.org',
'category': 'Human Rights Issues',
'error': 'read udp 141.212.123.185:39868->64.6.65.6:53: i/o timeout',
'anomaly': None,
'success': True,
'controls_failed': False,
'has_type_a': True,
'received': [
{'ip': '204.187.13.189'}
],
'rcode': ['0', '-1', '0'],
'date': '2021-04-18',
'start_time': '2021-04-18T14:51:45.836310062-04:00',
'end_time': '2021-04-18T14:51:48.162724942-04:00'
}
]
# yapf: enable
with TestPipeline() as p:
lines = p | 'create data' >> beam.Create(data)
lines2 = p | 'create tags' >> beam.Create(tags)
final = beam_tables._process_satellite_with_tags(lines, lines2)
beam_test_util.assert_that(final, beam_test_util.equal_to(expected))
def test_partition_satellite_input(self) -> None: # pylint: disable=no-self-use
"""Test partitioning of Satellite tag and answer input files."""
data = [("CP_Satellite-2020-09-02-12-00-01/resolvers.json", "tag"),
("CP_Satellite-2020-09-02-12-00-01/resolvers.json", "tag"),
("CP_Satellite-2020-09-02-12-00-01/tagged_resolvers.json", "tag"),
("CP_Satellite-2020-09-02-12-00-01/tagged_resolvers.json", "tag"),
("CP_Satellite-2020-09-02-12-00-01/tagged_answers.json", "tag"),
("CP_Satellite-2020-09-02-12-00-01/tagged_answers.json", "tag"),
("CP_Satellite-2020-09-02-12-00-01/interference.json", "row"),
("CP_Satellite-2020-09-02-12-00-01/interference.json", "row")]
expected_tags = data[0:6]
expected_rows = data[6:]
with TestPipeline() as p:
lines = p | 'create data' >> beam.Create(data)
tags, rows = lines | beam.Partition(
beam_tables._partition_satellite_input, 2)
beam_test_util.assert_that(
tags,
beam_test_util.equal_to(expected_tags),
label='assert_that/tags')
beam_test_util.assert_that(
rows,
beam_test_util.equal_to(expected_rows),
label='assert_that/rows')
def test_calculate_confidence(self) -> None:
"""Test calculating the confidence metrics for Satellite v1 data."""
# yapf: disable
scans: List[beam_tables.Row] = [
{
'ip': '114.114.114.110',
'country': 'CN',
'name': 'name',
'domain': 'abs-cbn.com',
'category': 'Culture',
'error': None,
'anomaly': True,
'success': True,
'received': [{'ip': '104.20.161.134', 'matches_control': ''}],
'date': '2020-09-02'
},
{
'ip': '1.1.1.3',
'country': 'US',
'name': 'special',
'domain': 'signal.org',
'category': 'Communication Tools',
'error': None,
'anomaly': False,
'success': True,
'received': [
{'ip': '13.249.134.38', 'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': 'c5ba7f2da503045170f1d66c3e9f84576d8f3a606bb246db589a8f62c65921af', 'matches_control': 'ip http asnum asname'},
{'ip': '13.249.134.44', 'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': '256e35b8bace0e9fe95f308deb35f82117cd7317f90a08f181516c31abe95b71', 'matches_control': 'ip http asnum asname'},
{'ip': '13.249.134.74', 'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': '2054d0fd3887e0ded023879770d6cde57633b7881f609f1042d90fedf41685fe', 'matches_control': 'ip http asnum asname'},
{'ip': '13.249.134.89', 'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': '0509322329cdae79475531a019a3628aa52598caa0135c5534905f0c4b4f1bac', 'matches_control': 'ip http asnum asname'}
],
'date': '2020-09-02'
},
{
'ip': '1.1.1.3',
'country': 'US',
'name': 'special',
'domain': 'signal.org',
'category': 'Communication Tools',
'error': None,
'anomaly': False,
'success': True,
'received': [
{'ip': '13.249.134.38', 'asname': 'AS1','asnum': 11111,'cert': None,'http': 'c5ba7f2da503045170f1d66c3e9f84576d8f3a606bb246db589a8f62c65921af', 'matches_control': ''},
{'ip': '13.249.134.44', 'asname': 'AS2','asnum': 22222,'cert': 'cert','http': '256e35b8bace0e9fe95f308deb35f82117cd7317f90a08f181516c31abe95b71', 'matches_control': 'asnum asname'},
{'ip': '13.249.134.74', 'asname': 'AS2','asnum': 22222,'cert': None,'http': '2054d0fd3887e0ded023879770d6cde57633b7881f609f1042d90fedf41685fe', 'matches_control': 'ip http asnum asname'},
{'ip': '13.249.134.89', 'asname': 'AS2','asnum': 22222,'cert': None,'http': '0509322329cdae79475531a019a3628aa52598caa0135c5534905f0c4b4f1bac', 'matches_control': 'ip http asnum asname'}
],
'date': '2020-09-02'
}
]
expected = [
{
'average': 0,
'matches': [0],
'untagged_controls': False,
'untagged_response': True
},
{
'average': 100,
'matches': [100, 100, 100, 100],
'untagged_controls': False,
'untagged_response': False
},
{
'average': 62.5,
'matches': [0, 50, 100, 100],
'untagged_controls': False,
'untagged_response': False
}
]
# yapf: enable
result = [
beam_tables._calculate_confidence(scan, 1)['confidence']
for scan in scans
]
self.assertListEqual(result, expected)
def test_verify(self) -> None:
"""Test verification of Satellite v1 data."""
# yapf: disable
scans: List[beam_tables.Row] = [
{
'ip': '114.114.114.110',
'country': 'CN',
'name': 'name',
'domain': 'abs-cbn.com',
'category': 'Culture',
'error': None,
'anomaly': True,
'success': True,
'received': [{'ip': '104.20.161.134', 'matches_control': ''}],
'date': '2020-09-02'
},
{
'ip': '114.114.114.110',
'country': 'CN',
'name': 'name',
'domain': 'ar.m.wikipedia.org',
'category': 'E-commerce',
'error': None,
'anomaly': True,
'success': True,
'received': [{'ip': '198.35.26.96', 'matches_control': ''}],
'date': '2020-09-02'
},
{
'ip': '1.1.1.3',
'country': 'US',
'name': 'special',
'domain': 'signal.org',
'category': 'Communication Tools',
'error': None,
'anomaly': True,
'success': True,
'received': [
{'ip': '13.249.134.38', 'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': 'c5ba7f2da503045170f1d66c3e9f84576d8f3a606bb246db589a8f62c65921af', 'matches_control': ''},
{'ip': '13.249.134.44', 'asname': 'AMAZON-02','asnum': 16509,'cert': None,'http': '256e35b8bace0e9fe95f308deb35f82117cd7317f90a08f181516c31abe95b71', 'matches_control': ''},
],
'date': '2020-09-02'
},
]
# yapf: enable
# mock data for the global interference IP - DOMAIN mapping
flatten.INTERFERENCE_IPDOMAIN = {
'104.20.161.134': {'abs-cbn.com', 'xyz.com', 'blah.com'},
'198.35.26.96': {'ar.m.wikipedia.org'},
}
expected = [
# answer IP is returned for multiple domains: likely to be interference
(False, ''),
# answer IP is returned for one domain: false positive
(True, 'domain_below_threshold'),
# answer IPs are CDN: false positive
(True, 'is_CDN is_CDN'),
]
result = []
for scan in scans:
scan = beam_tables._verify(scan)
result.append(
(scan['verify']['excluded'], scan['verify']['exclude_reason']))
self.assertListEqual(result, expected)
# pylint: enable=protected-access
if __name__ == '__main__':
unittest.main()
|
"""
day2-part1.py
Created on 2020-12-02
Updated on 2020-12-20
Copyright © Ryan Kan
"""
# INPUT
with open("input.txt", "r") as f:
lines = [line.strip() for line in f.readlines()]
f.close()
# COMPUTATION
noValid = 0
for line in lines:
# Parse each line
positions, character, password = line.split(" ")
character = list(character)[0] # Remove the trailing ":"
firstPosition, secondPosition = [int(x) for x in positions.split("-")]
# Check the positions
firstPosHasChar = password[firstPosition - 1] == character
secondPosHasChar = password[secondPosition - 1] == character
# Check if only one of the conditions is true
if firstPosHasChar ^ secondPosHasChar: # XOR the conditions
noValid += 1
# OUTPUT
print(noValid)
|
from .kheapsort import kheapsort
def test_main():
assert list(kheapsort([3, 2, 1, 5, 4], 2)) == [1, 2, 3, 4, 5]
assert list(kheapsort([5, 4, 3, 2, 1], 4)) == [1, 2, 3, 4, 5]
assert list(kheapsort([1, 2, 3, 4, 5], 0)) == [1, 2, 3, 4, 5]
if __name__ == "__main__":
test_main()
|
import argparse
from argostranslate import package
from argostranslate import settings
"""
Example usage:
argospm update
argospm install translate-en_es
argospm list
argospm remove translate-en_es
"""
def update_index(args):
"""Update the package index."""
package.update_package_index()
def get_available_packages():
"""Get available packages and update packages list if it is not done"""
try:
available_packages = package.get_available_packages()
except:
update_index()
available_packages = package.get_available_packages()
return available_packages
def install_package_print_path(available_package):
download_path = available_package.download()
print(f"Downloaded package {download_path}")
package.install_from_path(download_path)
print(f"Installed package to {settings.package_data_dir}")
def install_all_packages():
"""Install all packages."""
available_packages = get_available_packages()
for available_package in available_packages:
install_package_print_path(available_package)
def install_package(args):
"""Install package."""
available_packages = get_available_packages()
package_name = args.name
if package_name == "translate":
install_all_packages()
else:
for available_package in available_packages:
name = package.argospm_package_name(available_package)
if name == package_name:
install_package_print_path(available_package)
break
else:
print("Package not found")
exit(1)
def search_packages(args):
"""Display packages from remote index."""
available_packages = get_available_packages()
for pkg in available_packages:
if args.from_lang and args.from_lang != pkg.from_code:
continue
if args.to_lang and args.to_lang != pkg.to_code:
continue
print(
f"{package.argospm_package_name(pkg)}: "
+ f"{pkg.from_code} -> {pkg.to_code}"
)
def list_packages(args):
"""List packages."""
installed_packages = package.get_installed_packages()
for installed_package in installed_packages:
print(package.argospm_package_name(installed_package))
def remove_package(args):
"""Remove installed package."""
installed_packages = package.get_installed_packages()
package_name = args.name
for installed_package in installed_packages:
name = package.argospm_package_name(installed_package)
if name == package_name:
package.uninstall(installed_package)
print(f"Removed package {name}")
break
else:
print("Package not found")
exit(1)
def main():
parser = argparse.ArgumentParser()
subparser = parser.add_subparsers(help="Available commands.")
update_parser = subparser.add_parser(
"update", help="Downloads remote package index."
)
update_parser.set_defaults(callback=update_index)
search_parser = subparser.add_parser(
"search", help="Search package from remote index."
)
search_parser.add_argument(
"--from-lang",
"-f",
help="The code for the language to translate from (ISO 639-1)",
)
search_parser.add_argument(
"--to-lang", "-t", help="The code for the language to translate to (ISO 639-1)"
)
search_parser.set_defaults(callback=search_packages)
install_parser = subparser.add_parser("install", help="Install package.")
install_parser.add_argument(
"name", help='Package name, use "translate" to install all packages'
)
install_parser.set_defaults(callback=install_package)
list_parser = subparser.add_parser("list", help="List installed packages.")
list_parser.set_defaults(callback=list_packages)
remove_parser = subparser.add_parser("remove", help="Remove installed package.")
remove_parser.set_defaults(callback=remove_package)
remove_parser.add_argument("name", help="Package name")
args = parser.parse_args()
args.callback(args)
|
import sst
# Define SST core options
sst.setProgramOption("timebase", "1ps")
sst.setProgramOption("stopAtCycle", "0 ns")
sst.setStatisticOutput("sst.statOutputTXT", {"filepath" : "./L1.SIMPLEDRAM.tc.txt"})
# Define the simulation components
comp_cpu = sst.Component("cpu", "miranda.BaseCPU")
comp_cpu.addParams({
"verbose" : 0,
"generator" : "miranda.Stake",
"generatorParams.verbose" : 4,
"generatorParams.cores" : 1,
"generatorParams.proxy_kernel" : "pk",
"generatorParams.bin" : "/Users/jleidel/dev/working/gapbs/tc",
"generatorParams.args" : "-g 10 -n 1",
"generatorParams.mem_size" : "2048",
"printStats" : 1,
})
# Tell SST what statistics handling we want
sst.setStatisticLoadLevel(4)
# Enable statistics outputs
comp_cpu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_l1cache = sst.Component("l1cache", "memHierarchy.Cache")
comp_l1cache.addParams({
"access_latency_cycles" : "2",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MESI",
"associativity" : "4",
"cache_line_size" : "64",
"prefetcher" : "cassini.StridePrefetcher",
"debug" : "1",
"L1" : "1",
"cache_size" : "2KB"
})
# Enable statistics outputs
comp_l1cache.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_memory = sst.Component("memory", "memHierarchy.MemController")
comp_memory.addParams({
"coherence_protocol" : "MESI",
"backend.mem_size" : "2048MiB",
"clock" : "1GHz",
"max_requests_per_cycle" : 1,
"backing" : "none",
"backend.tCAS" : 3, # 11@800MHz roughly coverted to 200MHz
"backend.tRCD" : 3,
"backend.tRP" : 3,
"backend.cycle_time" : "5ns",
"backend.row_size" : "8KiB",
"backend.row_policy" : "open"
})
sst.enableAllStatisticsForComponentType("memHierarchy.Cache")
sst.enableAllStatisticsForComponentType("memHierarchy.MemController")
# Define the simulation links
link_cpu_cache_link = sst.Link("link_cpu_cache_link")
link_cpu_cache_link.connect( (comp_cpu, "cache_link", "1000ps"), (comp_l1cache, "high_network_0", "1000ps") )
link_cpu_cache_link.setNoCut()
link_mem_bus_link = sst.Link("link_mem_bus_link")
link_mem_bus_link.connect( (comp_l1cache, "low_network_0", "50ps"), (comp_memory, "direct_link", "50ps") )
|
import logging
from datetime import timedelta
from django.db.models import Count, Prefetch
from django.conf import settings
from django.urls import reverse
from dojo.celery import app
from celery.utils.log import get_task_logger
from dojo.models import Alerts, Product, Engagement, Finding, System_Settings, User
from django.utils import timezone
from dojo.utils import calculate_grade
from dojo.utils import sla_compute_and_notify
from dojo.notifications.helper import create_notification
logger = get_task_logger(__name__)
deduplicationLogger = logging.getLogger("dojo.specific-loggers.deduplication")
# Logs the error to the alerts table, which appears in the notification toolbar
def log_generic_alert(source, title, description):
create_notification(event='other', title=title, description=description,
icon='bullseye', source=source)
@app.task(bind=True)
def add_alerts(self, runinterval):
now = timezone.now()
upcoming_engagements = Engagement.objects.filter(target_start__gt=now + timedelta(days=3), target_start__lt=now + timedelta(days=3) + runinterval).order_by('target_start')
for engagement in upcoming_engagements:
create_notification(event='upcoming_engagement',
title='Upcoming engagement: %s' % engagement.name,
engagement=engagement,
recipients=[engagement.lead],
url=reverse('view_engagement', args=(engagement.id,)))
stale_engagements = Engagement.objects.filter(
target_start__gt=now - runinterval,
target_end__lt=now,
status='In Progress').order_by('-target_end')
for eng in stale_engagements:
create_notification(event='stale_engagement',
title='Stale Engagement: %s' % eng.name,
description='The engagement "%s" is stale. Target end was %s.' % (eng.name, eng.target_end.strftime("%b. %d, %Y")),
url=reverse('view_engagement', args=(eng.id,)),
recipients=[eng.lead])
system_settings = System_Settings.objects.get()
if system_settings.engagement_auto_close:
# Close Engagements older than user defined days
close_days = system_settings.engagement_auto_close_days
unclosed_engagements = Engagement.objects.filter(target_end__lte=now - timedelta(days=close_days),
status='In Progress').order_by('target_end')
for eng in unclosed_engagements:
create_notification(event='auto_close_engagement',
title=eng.name,
description='The engagement "%s" has auto-closed. Target end was %s.' % (eng.name, eng.target_end.strftime("%b. %d, %Y")),
url=reverse('view_engagement', args=(eng.id,)),
recipients=[eng.lead])
unclosed_engagements.update(status="Completed", active=False, updated=timezone.now())
# Calculate grade
if system_settings.enable_product_grade:
products = Product.objects.all()
for product in products:
calculate_grade(product)
@app.task(bind=True)
def cleanup_alerts(*args, **kwargs):
try:
max_alerts_per_user = settings.MAX_ALERTS_PER_USER
except System_Settings.DoesNotExist:
max_alerts_per_user = -1
if max_alerts_per_user > -1:
total_deleted_count = 0
logger.info('start deleting oldest alerts if a user has more than %s alerts', max_alerts_per_user)
users = User.objects.all()
for user in users:
alerts_to_delete = Alerts.objects.filter(user_id=user.id).order_by('-created')[max_alerts_per_user:].values_list("id", flat=True)
total_deleted_count += len(alerts_to_delete)
Alerts.objects.filter(pk__in=list(alerts_to_delete)).delete()
logger.info('total number of alerts deleted: %s', total_deleted_count)
@app.task(bind=True)
def async_dupe_delete(*args, **kwargs):
try:
system_settings = System_Settings.objects.get()
enabled = system_settings.delete_duplicates
dupe_max = system_settings.max_dupes
total_duplicate_delete_count_max_per_run = settings.DUPE_DELETE_MAX_PER_RUN
except System_Settings.DoesNotExist:
enabled = False
if enabled and dupe_max is None:
logger.info('skipping deletion of excess duplicates: max_dupes not configured')
return
if enabled:
logger.info("delete excess duplicates (max_dupes per finding: %s, max deletes per run: %s)", dupe_max, total_duplicate_delete_count_max_per_run)
deduplicationLogger.info("delete excess duplicates (max_dupes per finding: %s, max deletes per run: %s)", dupe_max, total_duplicate_delete_count_max_per_run)
# limit to 100 to prevent overlapping jobs
results = Finding.objects \
.filter(duplicate=True) \
.order_by() \
.values('duplicate_finding') \
.annotate(num_dupes=Count('id')) \
.filter(num_dupes__gt=dupe_max)[:total_duplicate_delete_count_max_per_run]
originals_with_too_many_duplicates_ids = [result['duplicate_finding'] for result in results]
originals_with_too_many_duplicates = Finding.objects.filter(id__in=originals_with_too_many_duplicates_ids).order_by('id')
# prefetch to make it faster
originals_with_too_many_duplicates = originals_with_too_many_duplicates.prefetch_related((Prefetch("original_finding",
queryset=Finding.objects.filter(duplicate=True).order_by('date'))))
total_deleted_count = 0
for original in originals_with_too_many_duplicates:
duplicate_list = original.original_finding.all()
dupe_count = len(duplicate_list) - dupe_max
for finding in duplicate_list:
deduplicationLogger.debug('deleting finding {}:{} ({}))'.format(finding.id, finding.title, finding.hash_code))
finding.delete()
total_deleted_count += 1
dupe_count -= 1
if dupe_count <= 0:
break
if total_deleted_count >= total_duplicate_delete_count_max_per_run:
break
if total_deleted_count >= total_duplicate_delete_count_max_per_run:
break
logger.info('total number of excess duplicates deleted: %s', total_deleted_count)
@app.task(ignore_result=False)
def celery_status():
return True
@app.task
def async_sla_compute_and_notify_task(*args, **kwargs):
logger.debug("Computing SLAs and notifying as needed")
try:
system_settings = System_Settings.objects.get()
if system_settings.enable_finding_sla:
sla_compute_and_notify(*args, **kwargs)
except Exception as e:
logger.error("An unexpected error was thrown calling the SLA code: {}".format(e))
@app.task
def jira_status_reconciliation_task(*args, **kwargs):
from dojo.management.commands.jira_status_reconciliation import jira_status_reconciliation
return jira_status_reconciliation(*args, **kwargs)
@app.task
def fix_loop_duplicates_task(*args, **kwargs):
from dojo.finding.helper import fix_loop_duplicates
return fix_loop_duplicates()
|
from datetime import date
ano = date.today().year
count1 = 0
count2 = 0
for c in range (1,8):
nasc = int(input('Digite o ano de nascimento da {}ª pessoa: '.format(c)))
if ano - nasc < 18:
count1 = count1 + 1
else:
count2 = count2 + 1
print('Ao todo tivemos {} pessoa(s) maior(es) de idade e {} pessoa(s) menor(es) de idade.'.format(count2,count1))
|
# Copyright 2013 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
|
import json
import os
import shutil
from pathlib import Path
import yaml
from click import exceptions
from ploomber.io._commander import Commander
from ploomber.telemetry import telemetry
import datetime
_SETUP_PY = 'setup.py'
_REQS_LOCK_TXT = 'requirements.lock.txt'
_REQS_TXT = 'requirements.txt'
_ENV_YML = 'environment.yml'
_ENV_LOCK_YML = 'environment.lock.yml'
def main(use_lock):
"""
Install project, automatically detecting if it's a conda-based or pip-based
project.
Parameters
---------
use_lock : bool
If True Uses requirements.lock.txt/environment.lock.yml and
requirements.dev.lock.txt/environment.dev.lock.yml files. Otherwise
it uses regular files and creates the lock ones after installing
dependencies
"""
start_time = datetime.datetime.now()
telemetry.log_api("install-started")
HAS_CONDA = shutil.which('conda')
HAS_ENV_YML = Path(_ENV_YML).exists()
HAS_ENV_LOCK_YML = Path(_ENV_LOCK_YML).exists()
HAS_REQS_TXT = Path(_REQS_TXT).exists()
HAS_REQS_LOCK_TXT = Path(_REQS_LOCK_TXT).exists()
if use_lock and not HAS_ENV_LOCK_YML and not HAS_REQS_LOCK_TXT:
err = ("Expected and environment.lock.yaml "
"(conda) or requirements.lock.txt (pip) in the current "
"directory. Add one of them and try again.")
telemetry.log_api("install-error",
metadata={
'type': 'no_lock',
'exception': err
})
raise exceptions.ClickException(err)
elif not use_lock and not HAS_ENV_YML and not HAS_REQS_TXT:
err = ("Expected an environment.yaml (conda)"
" or requirements.txt (pip) in the current directory."
" Add one of them and try again.")
telemetry.log_api("install-error",
metadata={
'type': 'no_env_requirements',
'exception': err
})
raise exceptions.ClickException(err)
elif (not HAS_CONDA and use_lock and HAS_ENV_LOCK_YML
and not HAS_REQS_LOCK_TXT):
err = ("Found env environment.lock.yaml "
"but conda is not installed. Install conda or add a "
"requirements.lock.txt to use pip instead")
telemetry.log_api("install-error",
metadata={
'type': 'no_conda',
'exception': err
})
raise exceptions.ClickException(err)
elif not HAS_CONDA and not use_lock and HAS_ENV_YML and not HAS_REQS_TXT:
err = ("Found environment.yaml but conda is not installed."
" Install conda or add a requirements.txt to use pip instead")
telemetry.log_api("install-error",
metadata={
'type': 'no_conda2',
'exception': err
})
raise exceptions.ClickException(err)
elif HAS_CONDA and use_lock and HAS_ENV_LOCK_YML:
main_conda(start_time, use_lock=True)
elif HAS_CONDA and not use_lock and HAS_ENV_YML:
main_conda(start_time, use_lock=False)
else:
main_pip(start_time, use_lock=use_lock)
def main_pip(start_time, use_lock):
"""
Install pip-based project (uses venv), looks for requirements.txt files
Parameters
----------
start_time : datetime
The initial runtime of the function.
use_lock : bool
If True Uses requirements.txt and requirements.dev.lock.txt files
"""
reqs_txt = _REQS_LOCK_TXT if use_lock else _REQS_TXT
reqs_dev_txt = ('requirements.dev.lock.txt'
if use_lock else 'requirements.dev.txt')
cmdr = Commander()
# TODO: modify readme to add how to activate env? probably also in conda
name = Path('.').resolve().name
venv_dir = f'venv-{name}'
cmdr.run('python', '-m', 'venv', venv_dir, description='Creating venv')
# add venv_dir to .gitignore if it doesn't exist
if Path('.gitignore').exists():
with open('.gitignore') as f:
if venv_dir not in f.read():
cmdr.append_inline(venv_dir, '.gitignore')
else:
cmdr.append_inline(venv_dir, '.gitignore')
folder, bin_name = _get_pip_folder_and_bin_name()
pip = str(Path(venv_dir, folder, bin_name))
if Path(_SETUP_PY).exists():
_pip_install_setup_py_pip(cmdr, pip)
_pip_install(cmdr, pip, lock=not use_lock, requirements=reqs_txt)
if Path(reqs_dev_txt).exists():
_pip_install(cmdr, pip, lock=not use_lock, requirements=reqs_dev_txt)
if os.name == 'nt':
cmd_activate = (
f'\nIf using cmd.exe: {venv_dir}\\Scripts\\activate.bat'
f'\nIf using PowerShell: {venv_dir}\\Scripts\\Activate.ps1')
else:
cmd_activate = f'source {venv_dir}/bin/activate'
_next_steps(cmdr, cmd_activate, start_time)
def main_conda(start_time, use_lock):
"""
Install conda-based project, looks for environment.yml files
Parameters
----------
start_time : datetime
The initial runtime of the function.
use_lock : bool
If True Uses environment.lock.yml and environment.dev.lock.yml files
"""
env_yml = _ENV_LOCK_YML if use_lock else _ENV_YML
# TODO: ensure ploomber-scaffold includes dependency file (including
# lock files in MANIFEST.in
cmdr = Commander()
# TODO: provide helpful error messages on each command
with open(env_yml) as f:
env_name = yaml.safe_load(f)['name']
current_env = Path(shutil.which('python')).parents[1].name
if env_name == current_env:
err = (f'{env_yml} will create an environment '
f'named {env_name!r}, which is the current active '
'environment. Move to a different one and try '
'again (e.g., "conda activate base")')
telemetry.log_api("install-error",
metadata={
'type': 'env_running_conflict',
'exception': err
})
raise RuntimeError(err)
# get current installed envs
conda = shutil.which('conda')
mamba = shutil.which('mamba')
# if already installed and running on windows, ask to delete first,
# otherwise it might lead to an intermittent error (permission denied
# on vcruntime140.dll)
if os.name == 'nt':
envs = cmdr.run(conda, 'env', 'list', '--json', capture_output=True)
already_installed = any([
env for env in json.loads(envs)['envs']
# only check in the envs folder, ignore envs in other locations
if 'envs' in env and env_name in env
])
if already_installed:
err = (f'Environment {env_name!r} already exists, '
f'delete it and try again '
f'(conda env remove --name {env_name})')
telemetry.log_api("install-error",
metadata={
'type': 'duplicate_env',
'exception': err
})
raise ValueError(err)
pkg_manager = mamba if mamba else conda
cmdr.run(pkg_manager,
'env',
'create',
'--file',
env_yml,
'--force',
description='Creating env')
if Path(_SETUP_PY).exists():
_pip_install_setup_py_conda(cmdr, env_name)
if not use_lock:
env_lock = cmdr.run(conda,
'env',
'export',
'--no-build',
'--name',
env_name,
description='Locking dependencies',
capture_output=True)
Path(_ENV_LOCK_YML).write_text(env_lock)
_try_conda_install_and_lock_dev(cmdr,
pkg_manager,
env_name,
use_lock=use_lock)
cmd_activate = f'conda activate {env_name}'
_next_steps(cmdr, cmd_activate, start_time)
def _get_pip_folder_and_bin_name():
folder = 'Scripts' if os.name == 'nt' else 'bin'
bin_name = 'pip.exe' if os.name == 'nt' else 'pip'
return folder, bin_name
def _find_conda_root(conda_bin):
conda_bin = Path(conda_bin)
for parent in conda_bin.parents:
# I've seen variations of this. on windows: Miniconda3 and miniconda3
# on linux miniconda3, anaconda and miniconda
if parent.name.lower() in {'miniconda3', 'miniconda', 'anaconda3'}:
return parent
err = ('Failed to locate conda root from '
f'directory: {str(conda_bin)!r}. Please submit an issue: '
'https://github.com/ploomber/ploomber/issues/new')
telemetry.log_api("install-error",
metadata={
'type': 'no_conda_root',
'exception': err
})
raise RuntimeError(err)
def _path_to_pip_in_env_with_name(conda_bin, env_name):
conda_root = _find_conda_root(conda_bin)
folder, bin_name = _get_pip_folder_and_bin_name()
return str(conda_root / 'envs' / env_name / folder / bin_name)
def _locate_pip_inside_conda(env_name):
"""
Locates pip inside the conda env with a given name
"""
pip = _path_to_pip_in_env_with_name(shutil.which('conda'), env_name)
# this might happen if the environment does not contain python/pip
if not Path(pip).exists():
err = (f'Could not locate pip in environment {env_name!r}, make sure '
'it is included in your environment.yml and try again')
telemetry.log_api("install-error",
metadata={
'type': 'no_pip_env',
'exception': err
})
raise FileNotFoundError(err)
return pip
def _pip_install_setup_py_conda(cmdr, env_name):
"""
Call "pip install --editable ." if setup.py exists. Automatically locates
the appropriate pip binary inside the conda env given the env name
"""
pip = _locate_pip_inside_conda(env_name)
_pip_install_setup_py_pip(cmdr, pip)
def _pip_install_setup_py_pip(cmdr, pip):
cmdr.run(pip,
'install',
'--editable',
'.',
description='Installing project')
def _try_conda_install_and_lock_dev(cmdr, pkg_manager, env_name, use_lock):
env_yml = 'environment.dev.lock.yml' if use_lock else 'environment.dev.yml'
if Path(env_yml).exists():
cmdr.run(pkg_manager,
'env',
'update',
'--file',
env_yml,
description='Installing dev dependencies')
if not use_lock:
env_lock = cmdr.run(shutil.which('conda'),
'env',
'export',
'--no-build',
'--name',
env_name,
description='Locking dev dependencies',
capture_output=True)
Path('environment.dev.lock.yml').write_text(env_lock)
def _next_steps(cmdr, cmd_activate, start_time):
end_time = datetime.datetime.now()
telemetry.log_api("install-success",
total_runtime=str(end_time - start_time))
cmdr.success('Done')
cmdr.print((f'Next steps:\n1. Activate environment: {cmd_activate}\n'
'2. Run pipeline: ploomber build'))
cmdr.success()
def _pip_install(cmdr, pip, lock, requirements=_REQS_TXT):
cmdr.run(pip,
'install',
'--requirement',
requirements,
description='Installing dependencies')
if lock:
pip_lock = cmdr.run(pip,
'freeze',
'--exclude-editable',
description='Locking dependencies',
capture_output=True)
name = Path(requirements).stem
Path(f'{name}.lock.txt').write_text(pip_lock)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from src.envs.gym import GymEnv
from src.envs.atari import AtariEnv
from src.models.dqn_fc import DQNFCModel
from src.models.dqn_cnn import DQNCNNModel
from src.models.drqn_fc import DRQNFCModel
from src.models.drqn_cnn import DRQNCNNModel
from src.replay.episodic import EpisodicMemory
from src.replay.random import RandomMemory
from src.agents.dqn import DQNAgent
from src.agents.drqn import DRQNAgent
EnvDict = {"gym": GymEnv,
"atari": AtariEnv}
ModelDict = {"dqn_cnn": DQNCNNModel,
"dqn_fc": DQNFCModel,
"drqn_fc": DRQNFCModel,
"drqn_cnn": DRQNCNNModel}
MemoryDict = {"episodic": EpisodicMemory,
"random": RandomMemory}
AgentDict = {"dqn": DQNAgent,
"drqn": DRQNAgent}
|
# MIT License
#
# Copyright (c) 2018 Evgeny Medvedev, evge.medvedev@gmail.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from builtins import map
from ethereumetl.domain.receipt_log import EthReceiptLog
from ethereumetl.domain.token_transfer import EthTokenTransfer
from ethereumetl.utils import chunk_string, hex_to_dec, to_normalized_address
# https://ethereum.stackexchange.com/questions/12553/understanding-logs-and-log-blooms
TRANSFER_EVENT_TOPIC = '0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef'
logger = logging.getLogger(__name__)
class EthTokenTransferExtractor(object):
def extract_transfer_from_log(self, receipt_log: EthReceiptLog) -> EthTokenTransfer:
topics = receipt_log.topics
if topics is None or len(topics) < 1:
# This is normal, topics can be empty for anonymous events
return None
if topics[0] == TRANSFER_EVENT_TOPIC:
# Handle unindexed event fields
topics_with_data = topics + split_to_words(receipt_log.data)
# if the number of topics and fields in data part != 4, then it's a weird event
if len(topics_with_data) != 4:
logger.warning("The number of topics and data parts is not equal to 4 in log {} of transaction {}"
.format(receipt_log.log_index, receipt_log.transaction_hash))
return None
token_transfer = EthTokenTransfer()
token_transfer.token_address = to_normalized_address(receipt_log.address)
token_transfer.from_address = word_to_address(topics_with_data[1])
token_transfer.to_address = word_to_address(topics_with_data[2])
token_transfer.value = hex_to_dec(topics_with_data[3])
token_transfer.transaction_hash = receipt_log.transaction_hash
token_transfer.log_index = receipt_log.log_index
token_transfer.block_timestamp = receipt_log.block_timestamp
token_transfer.block_number = receipt_log.block_number
token_transfer.block_hash = receipt_log.block_hash
return token_transfer
return None
def split_to_words(data):
if data and len(data) > 2:
data_without_0x = data[2:]
words = list(chunk_string(data_without_0x, 64))
words_with_0x = list(map(lambda word: '0x' + word, words))
return words_with_0x
return []
def word_to_address(param):
if param is None:
return None
elif len(param) >= 40:
return to_normalized_address('0x' + param[-40:])
else:
return to_normalized_address(param)
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'college_comission_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
'''
Week-2:Exercise-Fourth Power
Write a Python function, fourthPower, that takes in one number and returns that value raised to the fourth power.
You should use the square procedure that you defined in an earlier exercise (you don't need to redefine square in this box; when you call square, the grader will use our definition).
This function takes in one number and returns one number.
'''
#code
def fourthPower(x):
'''
x: int or float.
'''
# Your code here
return square(x) * square(x)
|
from gui import gui as GUI
# Code to actually run the GitUp app
if __name__ == "__main__":
app = GUI.GitUpApp()
app.mainloop()
|
#! /usr/bin/env python
"""
@file ion/core/unit_test.py
@author Bill French
@brief Base test class for all MI tests. Provides two base classes,
One for pyon tests and one for stand alone MI tests.
We have the stand alone test case for tests that don't require or can't
integrate with the common ION test case.
"""
from mi.core.log import get_logger
log = get_logger()
import unittest
import json
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import DataParticleValue
from mi.idk.exceptions import IDKException
class MiUnitTest(unittest.TestCase):
"""
Base class for non-ion tests. Use only if needed to avoid ion
test common code.
"""
def shortDescription(self):
return None
class MiIntTestCase(unittest.TestCase):
"""
Base class for most tests in MI.
"""
def shortDescription(self):
return None
class ParticleTestMixin(object):
"""
A class with some methods to test data particles. Intended to be mixed
into test classes so that particles can be tested in different areas of
the MI code base.
"""
def convert_data_particle_to_dict(self, data_particle):
"""
Convert a data particle object to a dict. This will work for data
particles as DataParticle object, dictionaries or a string
@param data_particle data particle
@return dictionary representation of a data particle
"""
if (isinstance(data_particle, DataParticle)):
sample_dict = data_particle.generate_dict()
elif (isinstance(data_particle, str)):
sample_dict = json.loads(data_particle)
elif (isinstance(data_particle, dict)):
sample_dict = data_particle
else:
raise IDKException("invalid data particle type: %s", type(data_particle))
return sample_dict
def get_data_particle_values_as_dict(self, data_particle):
"""
Return all of the data particle values as a dictionary with the value
id as the key and the value as the value. This method will decimate
the data, in the any characteristics other than value id and value.
i.e. binary.
@param data_particle data particle to inspect
@return return a dictionary with keys and values { value-id: value }
@throws IDKException when missing values dictionary
"""
sample_dict = self.convert_data_particle_to_dict(data_particle)
values = sample_dict.get('values')
if(not values):
raise IDKException("Data particle missing values")
if(not isinstance(values, list)):
raise IDKException("Data particle values not a list")
result = {}
for param in values:
if(not isinstance(param, dict)):
raise IDKException("must be a dict")
key = param.get('value_id')
if(key == None):
raise IDKException("value_id not defined")
if(key in result.keys()):
raise IDKException("duplicate value detected for %s" % key)
result[key] = param.get('value')
return result
def assert_data_particle_keys(self, data_particle_key, test_config):
"""
Ensure that the keys defined in the data particle key enum match
the keys defined in the test configuration.
@param data_particle_key object that defines all data particle keys.
@param test_config dictionary containing parameter verification values
"""
driver_keys = sorted(data_particle_key.list())
test_config_keys = sorted(test_config.keys())
self.assertEqual(driver_keys, test_config_keys)
def assert_data_particle_header(self, data_particle, stream_name, require_instrument_timestamp=False):
"""
Verify a data particle header is formatted properly
@param data_particle version 1 data particle
@param stream_name version 1 data particle
@param require_instrument_timestamp should we verify the instrument timestamp exists
"""
sample_dict = self.convert_data_particle_to_dict(data_particle)
log.debug("SAMPLEDICT: %s", sample_dict)
self.assertTrue(sample_dict[DataParticleKey.STREAM_NAME], stream_name)
self.assertTrue(sample_dict[DataParticleKey.PKT_FORMAT_ID], DataParticleValue.JSON_DATA)
self.assertTrue(sample_dict[DataParticleKey.PKT_VERSION], 1)
self.assertIsInstance(sample_dict[DataParticleKey.VALUES], list)
self.assertTrue(sample_dict.get(DataParticleKey.PREFERRED_TIMESTAMP))
self.assertIsNotNone(sample_dict.get(DataParticleKey.DRIVER_TIMESTAMP))
self.assertIsInstance(sample_dict.get(DataParticleKey.DRIVER_TIMESTAMP), float)
# It is highly unlikely that we should have a particle without a port agent timestamp,
# at least that's the current assumption.
self.assertIsNotNone(sample_dict.get(DataParticleKey.PORT_TIMESTAMP))
self.assertIsInstance(sample_dict.get(DataParticleKey.PORT_TIMESTAMP), float)
if(require_instrument_timestamp):
self.assertIsNotNone(sample_dict.get(DataParticleKey.INTERNAL_TIMESTAMP))
self.assertIsInstance(sample_dict.get(DataParticleKey.INTERNAL_TIMESTAMP), float)
|
_base_ = [
'../_base_/models/slowfast_r50.py', '../_base_/schedules/sgd_100e.py',
'../_base_/default_runtime.py'
]
model = dict(
cls_head=dict(
num_classes=7,
multi_class=True))
# dataset settings
dataset_type = 'RawframeDataset'
data_root = '/home/deepo/sanofius/TRAINING/dataset/rawframes_rgb'
ann_file_train = '/home/deepo/sanofius/TRAINING/train_file_NOWALK.txt'
ann_file_val = '/home/deepo/sanofius/TRAINING/val_file_NOWALK.txt'
ann_file_test = '/home/deepo/sanofius/TRAINING/test_file_NOWALK.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='SampleFrames', clip_len=16, frame_interval=2, num_clips=1),
#clip_len (int): Frames of each sampled output clip.
#frame_interval (int): Temporal interval of adjacent sampled frames.
#num_clips (int): Number of clips to be sampled. Default: 1.
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(
type='SampleFrames',
clip_len=16,
frame_interval=2,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(
type='SampleFrames',
clip_len=16,
frame_interval=2,
num_clips=1,
test_mode=True),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=8,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline,
multi_class=True,
num_classes=7,
modality='RGB'),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root,
pipeline=val_pipeline,
multi_class=True,
num_classes=7,
modality='RGB'),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root,
pipeline=test_pipeline,
multi_class=True,
num_classes=7,
modality='RGB'))
evaluation = dict(
interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy','per_class_accuracy'],
metric_options=dict(top_k_accuracy=dict(topk=(1, 2))),
save_best='mean_class_accuracy')
# runtime settings
checkpoint_config = dict(interval=5)
work_dir = '/home/deepo/sanofius/TRAINING/workdir/'
log_level = 'DEBUG'
total_epochs = 100 # Total epochs to train the model
# # learning policy
# lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook
# policy='step', # Policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9
# step=[40, 80]) # Steps to decay the learning rate
# total_epochs = 100 # Total epochs to train the model
# checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation
# interval=5) # Interval to save checkpoint
# evaluation = dict( # Config of evaluation during training
# interval=5, # Interval to perform evaluation
# metrics=['top_k_accuracy', 'mean_class_accuracy'], # Metrics to be performed
# metric_options=dict(top_k_accuracy=dict(topk=(1, 3))), # Set top-k accuracy to 1 and 3 during validation
# save_best='top_k_accuracy') # set `top_k_accuracy` as key indicator to save best checkpoint
|
class PipelineConstructionError(Exception):
pass
class InvalidConfigError(Exception):
pass
class InvalidModuleError(Exception):
pass
|
'''Update handlers for boes_bot.'''
import os
import datetime, calendar
import locale
import json
import pymongo
import pysftp
from pymongo import MongoClient
from telegram import messages
from telegram import types
from telegram import methods
from handlers.section_handler import SectionHandler
locale.setlocale(locale.LC_ALL,"es_ES.UTF-8")
basedir = os.path.realpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
)
def create_menu_options(day, month, year, entry_count_per_section):
options = [
[{
'text': f'{section.capitalize()} ({count})',
'callback_data': f'{SectionHandler.__name__}:{year}:{month}:{day}:{section}'
}] for section, count in entry_count_per_section.items()
]
return json.dumps({'inline_keyboard': options})
class DayHandler:
collection = 'diary_summary'
def handles(self, update):
if update.type != types.CallbackQuery:
return False
if update.content['data'].startswith(self.__class__.__name__):
return True
return False
def __call__(self, update, token, dbname, dburi, sftphost, sftpuser, sftppass, sftp_cnopts=None):
year, month, day = update.content['data'].split(':')[1:]
year, month, day = int(year), int(month), int(day)
date = datetime.datetime(year, month, day)
formatted_date = '{:%Y-%m-%d}'.format(date)
client = MongoClient(dburi)
db = client[dbname]
summary = db[self.collection].find_one({'date': formatted_date})
if summary == None:
client.close()
return
formatted_link = summary["link"]\
.replace(".", "\\.")\
.replace('=', '\\=')\
.replace('-', '\\-')\
.replace('_', '\\_')
formatted_entry_types = '\n'.join(
f'◇ {c} entradas son {t}'
for t, c
in summary["per_type_def_count"].items()
if t != ''
)
caption = (
f'Boletín del día *{day} de {calendar.month_name[month].capitalize()}, {year}*\\.'
f'Accesible en {formatted_link}\\.\n\n'
f'Se registraron un total de {summary["entry_count"]} entradas, de las cuales:\n'
f'{formatted_entry_types}'
)
if summary['summary_graphic']['telegram_id'] != '':
msg = messages.PhotoReplacementContent(
message_id=update.content['message']['message_id'],
reply_markup=create_menu_options(day, month, year, summary['sections']),
media={
'content': summary['summary_graphic']['telegram_id'],
'caption': caption,
'parse_mode': 'MarkdownV2',
})
msg.apply(token, update.content.cid, verbose=True)
else:
local_path = os.path.basename(summary['summary_graphic']['sftp_file'])
if not os.path.exists(local_path):
with pysftp.Connection(
sftphost,
username=sftpuser,
password=sftppass,
cnopts=sftp_cnopts) as sftp:
sftp.get(
summary['summary_graphic']['sftp_file'],
local_path)
with open(local_path, 'rb') as f:
msg = messages.PhotoReplacementContent(
message_id=update.content['message']['message_id'],
reply_markup='{}',
media={
'content': f,
'caption': caption,
'parse_mode': 'MarkdownV2',
'reply_markup': create_menu_options(day, month, year, summary['sections'])
})
status, res = msg.apply(token, update.content.cid, verbose=True)
if status == 200 and res['ok'] == True:
photo, thumbnail = res['result']['photo'][-2:]
photo_id = photo['file_id']
result = db[self.collection].update_one(
{'date': formatted_date},
{'$set': {'summary_graphic.telegram_id': photo_id}}
)
client.close()
if result.modified_count == 1:
os.remove(local_path)
|
#!/usr/bin/env python3
import sys
import struct
import pandas as pd
import matplotlib
# Must be before importing matplotlib.pyplot or pylab!
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
###############################################
dsize = 16
###############################################
def getFrame(data, iter = None):
if iter is None:
return data
else:
return data[data.iter==iter]
def dirtyCls(data, iter = None):
df = getFrame(data, iter)
return sum(df.bits.apply(lambda x: sum(x)))
def dirtyPages(data, iter = None):
df = getFrame(data, iter)
return len(df.page)
def dirtyClsB(data, iter = None):
return dirtyCls(data, iter) * 64
def dirtyPagesB(data, iter = None):
return dirtyPages(data, iter) * 4096
def avgDirtyCls(data):
numIter = len(data.iter.unique())
return dirtyCls(data) / float(numIter)
def avgDirtyPages(data):
numIter = len(data.iter.unique())
return dirtyPages(data) / float(numIter)
def avgDirtyClsPerPage(data, iter = None):
df = getFrame(data, iter)
numPages = dirtyPages(df)
return dirtyCls(df) / float(numPages)
def getDirtyCLsPerPage(fileContent, meta, iterFirst = None, iterLast = None):
if iterFirst is None:
### skip iteration 0 because we set all cache lines to dirty in that iteration
iterFirst = meta.iter.iloc[1]
if iterLast is None:
iterLast = len(meta.iter)
dfF = pd.DataFrame({'cnt':[0]*64}, index=range(1,65))
for i in range(iterFirst, iterLast):
data = getDataframeIter(fileContent, meta, i)
df = pd.DataFrame({'cnt':map((lambda XX: sum(data.bits.apply(lambda x: sum(x)) == XX)), range(1, 65))}, index=range(1,65))
dfF = dfF+df
return dfF
def getDiffPagesClsB(fileContent, meta, iterFirst = None, iterLast = None):
if iterFirst is None:
iterFirst = meta.iter.iloc[0]
if iterLast is None:
iterLast = len(meta.iter)
df = pd.DataFrame()
for i in range(iterFirst, iterLast):
data = getDataframeIter(fileContent, meta, i)
dcl = dirtyClsB(data)
dp = dirtyPagesB(data)
df1 = pd.DataFrame({'iter':[i], 'dirtyCl':[dcl], 'dirtyP':[dp], 'amplif':[dp*1.0/dcl], 'pcnt':[dcl*100.0/dp]})
df = df.append(df1)
return df
def readBinFile(filename):
with open(filename, mode='rb') as file:
fileContent = file.read()
return fileContent
def getMetadata(fileContent):
first = 0
totalSize = len(fileContent)
meta=pd.DataFrame()
while (first < totalSize):
(iter, count) = struct.unpack("QQ", fileContent[first:(first+dsize)])
print(str(iter) + ' ' + str(count))
df1 = pd.DataFrame({'iter':[iter], 'count':[count], 'pos':[first]})
meta = meta.append(df1)
first = count * dsize + (first + dsize)
return meta
def getDataframeWBitlist(fileContent):
first = 0
totalSize = len(fileContent)
data=pd.DataFrame()
while (first < totalSize):
(iter, count) = struct.unpack("QQ", fileContent[first:(first+dsize)])
print(str(iter) + ' ' + str(count))
output = struct.unpack(count*'QQ', fileContent[(first+dsize):count*dsize+(first+dsize)])
dfbits = pd.DataFrame({'bits':output[1::2]})
df1 = pd.DataFrame({'iter':[iter]*count,
'page':output[::2],
'bits':dfbits.bits.apply(lambda x: [int(i) for i in list('{0:0b}'.format(x))])})
data = data.append(df1)
first = count * dsize + (first + dsize)
return data
def getDataframeIter(fileContent, meta, iter):
first = meta[meta.iter == iter].pos[0]
totalSize = len(fileContent)
(iter, count) = struct.unpack("QQ", fileContent[first:(first+dsize)])
print(str(iter) + ' ' + str(count))
output = struct.unpack(count*'QQ', fileContent[(first+dsize):count*dsize+(first+dsize)])
dfbits = pd.DataFrame({'bits':output[1::2]})
data = pd.DataFrame({'iter':[iter]*count,
'page':output[::2],
'bits':dfbits.bits.apply(lambda x: [int(i) for i in list('{0:0b}'.format(x))])})
return data
def getDataframeWStrings(fileContent):
first = 0
totalSize = len(fileContent)
data=pd.DataFrame()
while (first < totalSize):
(iter, count) = struct.unpack("QQ", fileContent[first:(first+dsize)])
print(str(iter) + ' ' + str(count))
output = struct.unpack(count*'QQ', fileContent[(first+dsize):count*dsize+(first+dsize)])
df1 = pd.DataFrame({'iter':[iter]*count, 'page':output[::2], 'bits':output[1::2]})
df = pd.DataFrame({'iter': [iter]*count,
'page': df1.page.apply(lambda x: format((x), '#x')),
'bits': df1.bits.apply(lambda x: format((x), '064b'))})
data = data.append(df)
first = count * dsize + (first + dsize)
return data
def savePlotAmplif(df, filename, iterFirst, iterLast):
ax = plt.gca()
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
df.plot('iter','amplif', kind='line', color='orange', ax=ax)
plt.xlabel("Iteration")
plt.ylabel("Dirty data amplification")
plt.legend().remove()
plt.grid(linestyle='dotted')
#removing top and right borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
doSavePlot(filename, "amplif", iterFirst, iterLast)
def savePlotPcnt(df, filename, iterFirst, iterLast):
ax = plt.gca()
plt.ylim(0, 100)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
df.plot('iter','pcnt', kind='line', color='orange', ax=ax)
plt.xlabel("Iteration")
plt.ylabel("Dirty data (%)")
#plt.title("Dirty data amplification")
plt.legend().remove()
#plt.grid(True)
plt.grid(linestyle='dotted')
#removing top and right borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
doSavePlot(filename, "pcnt", iterFirst, iterLast)
def doSavePlot(filename, unique, iterFirst, iterLast):
name = filename + "_" + unique + "_" + str(iterFirst) + "to" + str(iterLast) + ".pdf"
print("Saving to {}".format(name))
plt.savefig(name)
plt.clf()
plt.cla()
plt.close()
def savePlotData(df, filename, iterFirst, iterLast):
ax = plt.gca()
ax.set_yscale('log')
#ax.set_yscale('linear')
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
df.plot(x='iter', y='dirtyCl', label='Cache-lines', kind='line', color='blue', ax=ax)
df.plot(x='iter', y='dirtyP', label='Pages', kind='line', color='orange', ax=ax)
#removing top and right borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#plt.grid(True)
plt.grid(linestyle='dotted')
plt.xlabel("Iteration")
plt.ylabel("Dirty data (bytes)")
#plt.title("Dirty data amplification")
#plt.legend().remove()
doSavePlot(filename, "data", iterFirst, iterLast)
def savePlotBar(df, filename, iterFirst, iterLast):
ax = df.plot.bar(rot=0)
### print every n labels on X axis
n = 4
ticks = ax.xaxis.get_ticklocs()
ticklabels = [l.get_text() for l in ax.xaxis.get_ticklabels()]
ax.xaxis.set_ticks(ticks[::n])
ax.xaxis.set_ticklabels(ticklabels[::n])
#removing top and right borders
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.xlabel("Number of dirty cache lines per page")
plt.ylabel("Number of dirty cache lines")
plt.legend().remove()
doSaveCSV(df, filename, "bar", iterFirst, iterLast)
doSavePlot(filename, "bar", iterFirst, iterLast)
def doSaveCSV(data, filename, unique, iterFirst, iterLast):
name = filename
print("Saving data to file: {}".format(name))
data.to_csv(name, sep=' ', mode='w')
def savePlot(filename, fileContent, meta, iterFirstA = None, iterLastA = None):
if iterFirstA is None:
iterFirst = meta.iter.iloc[0]
if iterLastA is None:
iterLast = len(meta.iter)
df = getDiffPagesClsB(fileContent, meta, iterFirst, iterLast)
doSaveCSV(df, filename, "all", iterFirst, iterLast)
#################################################################3
argc=len(sys.argv)
if argc != 3:
print("Usage: python3 {} <input_file> <output_data>".format(__file__))
filename=sys.argv[1]
graphname=sys.argv[2]
print(filename)
print(graphname)
fileContent = readBinFile(filename)
meta = getMetadata(fileContent)
savePlot(graphname, fileContent, meta)
|
def print_my_info():
print("안녕하세요.")
print("홍길동입니다.")
print("만나서반갑습니다.")
print_my_info()
|
"""Config flow for Control4 integration."""
from asyncio import TimeoutError as asyncioTimeoutError
import logging
from aiohttp.client_exceptions import ClientError
from pyControl4.account import C4Account
from pyControl4.director import C4Director
from pyControl4.error_handling import NotFound, Unauthorized
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.device_registry import format_mac
from .const import CONF_CONTROLLER_UNIQUE_ID, DEFAULT_SCAN_INTERVAL, MIN_SCAN_INTERVAL
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
)
class Control4Validator:
"""Validates that config details can be used to authenticate and communicate with Control4."""
def __init__(self, host, username, password, hass):
"""Initialize."""
self.host = host
self.username = username
self.password = password
self.controller_unique_id = None
self.director_bearer_token = None
self.hass = hass
async def authenticate(self) -> bool:
"""Test if we can authenticate with the Control4 account API."""
try:
account_session = aiohttp_client.async_get_clientsession(self.hass)
account = C4Account(self.username, self.password, account_session)
# Authenticate with Control4 account
await account.getAccountBearerToken()
# Get controller name
account_controllers = await account.getAccountControllers()
self.controller_unique_id = account_controllers["controllerCommonName"]
# Get bearer token to communicate with controller locally
self.director_bearer_token = (
await account.getDirectorBearerToken(self.controller_unique_id)
)["token"]
return True
except (Unauthorized, NotFound):
return False
async def connect_to_director(self) -> bool:
"""Test if we can connect to the local Control4 Director."""
try:
director_session = aiohttp_client.async_get_clientsession(
self.hass, verify_ssl=False
)
director = C4Director(
self.host, self.director_bearer_token, director_session
)
await director.getAllItemInfo()
return True
except (Unauthorized, ClientError, asyncioTimeoutError):
_LOGGER.error("Failed to connect to the Control4 controller")
return False
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Control4."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
hub = Control4Validator(
user_input["host"],
user_input["username"],
user_input["password"],
self.hass,
)
try:
if not await hub.authenticate():
raise InvalidAuth
if not await hub.connect_to_director():
raise CannotConnect
except InvalidAuth:
errors["base"] = "invalid_auth"
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if not errors:
controller_unique_id = hub.controller_unique_id
mac = (controller_unique_id.split("_", 3))[2]
formatted_mac = format_mac(mac)
await self.async_set_unique_id(formatted_mac)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=controller_unique_id,
data={
CONF_HOST: user_input["host"],
CONF_USERNAME: user_input["username"],
CONF_PASSWORD: user_input["password"],
CONF_CONTROLLER_UNIQUE_ID: controller_unique_id,
},
)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Control4."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
),
): vol.All(cv.positive_int, vol.Clamp(min=MIN_SCAN_INTERVAL)),
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
# source https://www.youtube.com/playlist?list=PLEsfXFp6DpzRyxnU-vfs3vk-61Wpt7bOS
import cv2
import os
# source: https://stackoverflow.com/a/44659589
def image_resize(image, width = None, height = None, inter = cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation = inter)
# return the resized image
return resized
def alpha_blend(frame_1, frame_2, mask):
alpha = mask/255.0
blended = cv2.convertScaleAbs(frame_1*(1-alpha) + frame_2*alpha)
return blended
def apply_circle_focus_blur(frame, intensity=0.2):
frame_h, frame_w, frame_c = frame.shape
y = int(frame_h/2)
x = int(frame_w/2)
mask = np.zeros((frame_h, frame_w, 4), dtype='uint8')
cv2.circle(mask, (x, y), int(y/2), (255,255,255), -1, cv2.LINE_AA)
mask = cv2.GaussianBlur(mask, (21,21),11 )
blured = cv2.GaussianBlur(frame, (21,21), 11)
blended = alpha_blend(frame, blured, 255-mask)
frame = cv2.cvtColor(blended, cv2.COLOR_BGRA2BGR)
return frame
def portrait_mode(frame):
# cv2.imshow('frame', frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(gray, 120,255,cv2.THRESH_BINARY)
mask = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGRA)
blured = cv2.GaussianBlur(frame, (21,21), 11)
blended = alpha_blend(frame, blured, mask)
frame = cv2.cvtColor(blended, cv2.COLOR_BGRA2BGR)
return frame
class CFEVideoConf(object):
# Standard Video Dimensions Sizes
STD_DIMENSIONS = {
"360p": (480, 360),
"480p": (640, 480),
"720p": (1280, 720),
"1080p": (1920, 1080),
"4k": (3840, 2160),
}
# Video Encoding, might require additional installs
# Types of Codes: http://www.fourcc.org/codecs.php
VIDEO_TYPE = {
'avi': cv2.VideoWriter_fourcc(*'XVID'),
#'mp4': cv2.VideoWriter_fourcc(*'H264'),
'mp4': cv2.VideoWriter_fourcc(*'XVID'),
}
width = 640
height = 480
dims = (640, 480)
capture = None
video_type = None
def __init__(self, capture, filepath, res="480p", *args, **kwargs):
self.capture = capture
self.filepath = filepath
self.width, self.height = self.get_dims(res=res)
self.video_type = self.get_video_type()
# Set resolution for the video capture
# Function adapted from https://kirr.co/0l6qmh
def change_res(self, width, height):
self.capture.set(3, width)
self.capture.set(4, height)
def get_dims(self, res='480p'):
width, height = self.STD_DIMENSIONS['480p']
if res in self.STD_DIMENSIONS:
width, height = self.STD_DIMENSIONS[res]
self.change_res(width, height)
self.dims = (width, height)
return width, height
def get_video_type(self):
filename, ext = os.path.splitext(self.filepath)
if ext in self.VIDEO_TYPE:
return self.VIDEO_TYPE[ext]
return self.VIDEO_TYPE['avi']
|
#
# Configuration file for gravity inversion for use by planeGravInv.py
#
# Inversion constants:
#
# scale between misfit and regularization
mu = 1.e-14
#
# used to scale computed density. kg/m^3
rho_0 = 1.
#
# IPCG tolerance *|r| <= atol+rtol*|r0|* (energy norm)
# absolute tolerance for IPCG interations
atol = 0.
#
# relative tolerance for IPCG iterations
rtol = 1.e-2
#
# tolerance for solving PDEs
# make sure this is not more than the square of rtol
pdetol = 1.e-10
#
# maximum number of IPCG iterations
iter_max = 500
#
# data scale. Program assumes m/s^2,
# converts micrometres/s^2 to m/s^2
data_scale = 1.e-6
#
#
# File names
# mesh file name. This needs to be in msh or fly format
mesh_name = "Gravity_201x338.fly"
#
# data file name in netcdf format. See readme.md for more details.
data_file = "Gravity_201x338.nc"
#
# output file name for .csv output and silo output
output_name = "G_201x338_rho0_{0:1.3e}_mu_{1:1.3e}".format(rho_0,mu)
#
# Level for the verbosity of the output, "low", "medium" or "high".
# low:
# screen outputs:
# data range,
# summaries of gravity data and final gravity
# initial, final and difference misfits
# file output:
# silo of final solution
# medium: low outputs +
# screen outputs:
# residual norm from the IPCG iterations
# high: medium outputs +
# screen outputs:
# misfit and smoothing value at each iteration step
# file outputs:
# csv files for misfit and smoothing at each IPCG iteration
# silos at misfit values of 0.05, 0.01, 0.008 and 0.005. (Initial misfit is 0.5.)
VerboseLevel = "low"
#VerboseLevel = "medium"
#VerboseLevel = "high"
|
"""
Ray queries using the pyembree package with the
API wrapped to match our native raytracer.
"""
import numpy as np
from collections import deque
from copy import deepcopy
from pyembree import __version__ as _ver
from pyembree import rtcore_scene
from pyembree.mesh_construction import TriangleMesh
from pkg_resources import parse_version
from .ray_util import contains_points
from .. import util
from .. import caching
from .. import intersections
# the factor of geometry.scale to offset a ray from a triangle
# to reliably not hit its origin triangle
_ray_offset_factor = 1e-4
# we want to clip our offset to a sane distance
_ray_offset_floor = 1e-8
# see if we're using a newer version of the pyembree wrapper
_embree_new = parse_version(_ver) >= parse_version('0.1.4')
# both old and new versions require exact but different type
_embree_dtype = [np.float64, np.float32][int(_embree_new)]
class RayMeshIntersector(object):
def __init__(self,
geometry,
scale_to_box=True):
"""
Do ray- mesh queries.
Parameters
-------------
geometry : Trimesh object
Mesh to do ray tests on
scale_to_box : bool
If true, will scale mesh to approximate
unit cube to avoid problems with extreme
large or small meshes.
"""
self.mesh = geometry
self._scale_to_box = scale_to_box
self._cache = caching.Cache(id_function=self.mesh.crc)
@property
def _scale(self):
"""
Scaling factor for precision.
"""
if self._scale_to_box:
# scale vertices to approximately a cube to help with
# numerical issues at very large/small scales
scale = 100.0 / self.mesh.scale
else:
scale = 1.0
return scale
@caching.cache_decorator
def _scene(self):
"""
A cached version of the pyembree scene.
"""
return _EmbreeWrap(vertices=self.mesh.vertices,
faces=self.mesh.faces,
scale=self._scale)
def intersects_location(self,
ray_origins,
ray_directions,
multiple_hits=True):
"""
Return the location of where a ray hits a surface.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
Returns
---------
locations: (n) sequence of (m,3) intersection points
index_ray: (n,) int, list of ray index
index_tri: (n,) int, list of triangle (face) indexes
"""
(index_tri,
index_ray,
locations) = self.intersects_id(
ray_origins=ray_origins,
ray_directions=ray_directions,
multiple_hits=multiple_hits,
return_locations=True)
return locations, index_ray, index_tri
def intersects_id(self,
ray_origins,
ray_directions,
multiple_hits=True,
max_hits=20,
return_locations=False):
"""
Find the triangles hit by a list of rays, including
optionally multiple hits along a single ray.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
multiple_hits: bool, if True will return every hit along the ray
if False will only return first hit
return_locations: bool, should we return hit locations or not
Returns
----------
index_tri: (m,) int, index of triangle the ray hit
index_ray: (m,) int, index of ray
locations: (m,3) float, locations in space
"""
# make sure input is _dtype for embree
ray_origins = np.asanyarray(
deepcopy(ray_origins),
dtype=np.float64)
ray_directions = np.asanyarray(ray_directions,
dtype=np.float64)
ray_directions = util.unitize(ray_directions)
# since we are constructing all hits, save them to a deque then
# stack into (depth, len(rays)) at the end
result_triangle = deque()
result_ray_idx = deque()
result_locations = deque()
# the mask for which rays are still active
current = np.ones(len(ray_origins), dtype=np.bool)
if multiple_hits or return_locations:
# how much to offset ray to transport to the other side of face
distance = np.clip(_ray_offset_factor * self._scale,
_ray_offset_floor,
np.inf)
ray_offsets = ray_directions * distance
# grab the planes from triangles
plane_origins = self.mesh.triangles[:, 0, :]
plane_normals = self.mesh.face_normals
# use a for loop rather than a while to ensure this exits
# if a ray is offset from a triangle and then is reported
# hitting itself this could get stuck on that one triangle
for query_depth in range(max_hits):
# run the pyembree query
# if you set output=1 it will calculate distance along
# ray, which is bizzarely slower than our calculation
query = self._scene.run(
ray_origins[current],
ray_directions[current])
# basically we need to reduce the rays to the ones that hit
# something
hit = query != -1
# which triangle indexes were hit
hit_triangle = query[hit]
# eliminate rays that didn't hit anything from future queries
current_index = np.nonzero(current)[0]
current_index_no_hit = current_index[np.logical_not(hit)]
current_index_hit = current_index[hit]
current[current_index_no_hit] = False
# append the triangle and ray index to the results
result_triangle.append(hit_triangle)
result_ray_idx.append(current_index_hit)
# if we don't need all of the hits, return the first one
if ((not multiple_hits and
not return_locations) or
not hit.any()):
break
# find the location of where the ray hit the triangle plane
new_origins, valid = intersections.planes_lines(
plane_origins=plane_origins[hit_triangle],
plane_normals=plane_normals[hit_triangle],
line_origins=ray_origins[current],
line_directions=ray_directions[current])
if not valid.all():
# since a plane intersection was invalid we have to go back and
# fix some stuff, we pop the ray index and triangle index,
# apply the valid mask then append it right back to keep our
# indexes intact
result_ray_idx.append(result_ray_idx.pop()[valid])
result_triangle.append(result_triangle.pop()[valid])
# update the current rays to reflect that we couldn't find a
# new origin
current[current_index_hit[np.logical_not(valid)]] = False
# since we had to find the intersection point anyway we save it
# even if we're not going to return it
result_locations.extend(new_origins)
if multiple_hits:
# move the ray origin to the other side of the triangle
ray_origins[current] = new_origins + ray_offsets[current]
else:
break
# stack the deques into nice 1D numpy arrays
index_tri = np.hstack(result_triangle)
index_ray = np.hstack(result_ray_idx)
if return_locations:
locations = (
np.zeros((0, 3), float) if len(result_locations) == 0
else np.array(result_locations))
return index_tri, index_ray, locations
return index_tri, index_ray
def intersects_first(self,
ray_origins,
ray_directions):
"""
Find the index of the first triangle a ray hits.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
Returns
----------
triangle_index: (n,) int, index of triangle ray hit, or -1 if not hit
"""
ray_origins = np.asanyarray(deepcopy(ray_origins))
ray_directions = np.asanyarray(ray_directions)
triangle_index = self._scene.run(ray_origins,
ray_directions)
return triangle_index
def intersects_any(self,
ray_origins,
ray_directions):
"""
Check if a list of rays hits the surface.
Parameters
----------
ray_origins: (n,3) float, origins of rays
ray_directions: (n,3) float, direction (vector) of rays
Returns
----------
hit: (n,) bool, did each ray hit the surface
"""
first = self.intersects_first(ray_origins=ray_origins,
ray_directions=ray_directions)
hit = first != -1
return hit
def contains_points(self, points):
"""
Check if a mesh contains a list of points, using ray tests.
If the point is on the surface of the mesh, behavior is undefined.
Parameters
---------
points: (n,3) points in space
Returns
---------
contains: (n,) bool
Whether point is inside mesh or not
"""
return contains_points(self, points)
class _EmbreeWrap(object):
"""
A light wrapper for PyEmbree scene objects which
allows queries to be scaled to help with precision
issues, as well as selecting the correct dtypes.
"""
def __init__(self, vertices, faces, scale):
scaled = np.asanyarray(vertices,
dtype=np.float64)
self.origin = scaled.min(axis=0)
self.scale = float(scale)
scaled = (scaled - self.origin) * self.scale
self.scene = rtcore_scene.EmbreeScene()
# assign the geometry to the scene
TriangleMesh(
scene=self.scene,
vertices=scaled.astype(_embree_dtype),
indices=faces.astype(np.int32))
def run(self, origins, normals, **kwargs):
scaled = (np.asanyarray(origins,
dtype=np.float64) - self.origin) * self.scale
return self.scene.run(scaled.astype(_embree_dtype),
normals.astype(_embree_dtype),
**kwargs)
|
import pytest
class TestHooks:
@pytest.fixture(autouse=True)
def create_test_file(self, testdir):
testdir.makepyfile(
"""
import os
def test_a(): pass
def test_b(): pass
def test_c(): pass
"""
)
def test_runtest_logreport(self, testdir):
"""Test that log reports from pytest_runtest_logreport when running
with xdist contain "node", "nodeid", "worker_id", and "testrun_uid" attributes. (#8)
"""
testdir.makeconftest(
"""
def pytest_runtest_logreport(report):
if hasattr(report, 'node'):
if report.when == "call":
workerid = report.node.workerinput['workerid']
testrunuid = report.node.workerinput['testrunuid']
if workerid != report.worker_id:
print("HOOK: Worker id mismatch: %s %s"
% (workerid, report.worker_id))
elif testrunuid != report.testrun_uid:
print("HOOK: Testrun uid mismatch: %s %s"
% (testrunuid, report.testrun_uid))
else:
print("HOOK: %s %s %s"
% (report.nodeid, report.worker_id, report.testrun_uid))
"""
)
res = testdir.runpytest("-n1", "-s")
res.stdout.fnmatch_lines(
[
"*HOOK: test_runtest_logreport.py::test_a gw0 *",
"*HOOK: test_runtest_logreport.py::test_b gw0 *",
"*HOOK: test_runtest_logreport.py::test_c gw0 *",
"*3 passed*",
]
)
def test_node_collection_finished(self, testdir):
"""Test pytest_xdist_node_collection_finished hook (#8)."""
testdir.makeconftest(
"""
def pytest_xdist_node_collection_finished(node, ids):
workerid = node.workerinput['workerid']
stripped_ids = [x.split('::')[1] for x in ids]
print("HOOK: %s %s" % (workerid, ', '.join(stripped_ids)))
"""
)
res = testdir.runpytest("-n2", "-s")
res.stdout.fnmatch_lines_random(
["*HOOK: gw0 test_a, test_b, test_c", "*HOOK: gw1 test_a, test_b, test_c"]
)
res.stdout.fnmatch_lines(["*3 passed*"])
|
#!/usr/bin/env python
# Copyright 2020-2022 The Defold Foundation
# Copyright 2014-2020 King
# Copyright 2009-2014 Ragnar Svensson, Christian Murray
# Licensed under the Defold License version 1.0 (the "License"); you may not use
# this file except in compliance with the License.
#
# You may obtain a copy of the License, together with FAQs at
# https://www.defold.com/license
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import os
import re
import base64
import mimetypes
JS_DIR = "js/"
CSS_DIR = "css/"
IMAGES_DIR = "images/"
js_concat = ""
css_concat = ""
# gather javascript files
for entry in os.listdir(JS_DIR):
if entry.endswith(".js"):
full_path = os.path.join(JS_DIR, entry)
with open(full_path, 'r') as js_file:
js_concat += "/* source: " + full_path + " */\n"
js_concat += js_file.read() + "\n"
# gather css files
for entry in os.listdir(CSS_DIR):
if entry.endswith(".css"):
full_path = os.path.join(CSS_DIR, entry)
with open(full_path, 'r') as css_file:
css_concat += "/* source: " + full_path + " */\n"
css_concat += css_file.read() + "\n"
# gather images and generate base64 representation
for entry in os.listdir(IMAGES_DIR):
# look for url() usage where the path includes this entry
url_re = re.compile("url\([\"\'][^\"\']+" + entry + "[\"\']\)")
full_path = os.path.join(IMAGES_DIR, entry)
file_name, file_extension = os.path.splitext(entry)
mime_string = mimetypes.types_map[file_extension]
with open(full_path, "rb") as image_file:
encoded_string = base64.b64encode(image_file.read())
url_data_string = "url(data:" + mime_string + ";base64," + encoded_string + ")"
css_concat = url_re.sub(url_data_string, css_concat)
print('<style type="text/css">')
print(css_concat.strip())
print('</style>')
print('<script type="text/javascript">')
print(js_concat.strip())
print('</script>')
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Template task in which a ball should fall off a slanted obstacle."""
import phyre.creator as creator_lib
__OBSTACLE_LOCS = ['left', 'right']
__OBSTACLE_SCALES = [val * 0.1 for val in range(2, 9)]
__BALL_XS = [val * 0.1 for val in range(1, 10)]
@creator_lib.define_task_template(obstacle_location=__OBSTACLE_LOCS,
obstacle_scale=__OBSTACLE_SCALES,
ball_x=__BALL_XS)
def build_task(C, obstacle_location, obstacle_scale, ball_x):
# Add slanted obstacle.
obstacle = C.add('static bar', scale=obstacle_scale) \
.set_angle(30. if obstacle_location == 'left' else -30.) \
.set_bottom(0.2 * C.scene.height)
if obstacle_location == 'left':
obstacle.set_left(-0.01 * C.scene.width)
else:
obstacle.set_right(1.01 * C.scene.width)
# Add ball that hovers over obstacle.
ball = C.add('dynamic ball', scale=0.1) \
.set_center_x(ball_x * C.scene.width) \
.set_bottom(0.9 * C.scene.height)
if obstacle_location == 'left' and ball.right > obstacle.right:
raise creator_lib.SkipTemplateParams
if obstacle_location == 'right' and ball.left < obstacle.left:
raise creator_lib.SkipTemplateParams
# Create assignment:
C.update_task(body1=ball,
body2=C.bottom_wall,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.SINGLE_OBJECT)
|
def func():
if cond1:
true1
if cond2:
pass
else:
false2
if cond3:
true3
else:
false3
try:
if cond4:
true4()
else:
false4()
finally:
pass
if cond5:
try:
true5()
except:
pass
else:
false5
if cond6:
if cond7:
true7
else:
false7
else:
false6
if cond8:
for i in range(10):
pass
else:
false8
if cond9:
while cond10:
true10
false10
else:
false9
while 1:
if cond12:
try:
true12()
except IOError:
true12 = 0
def func2():
while condw1:
truew2
def func3():
if condi1:
truei1
def func4():
while True:
no_branch
if unreachable:
not reachable
def func5():
while True:
break
if cond11:
true11
def func6():
if cond13 or cond13a:
true13
if cond14 and cond14a:
true14
true15 if cond15 else false15
true16 if cond16 or cond17 else false16
true18 if cond18 and cond19 else false18
def func7():
yield cond20 or cond21 or cond22
yield cond23 and cond24 and cond25
|
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant.labeled_examples_to_vcf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
from absl import flags
from absl.testing import absltest
from absl.testing import flagsaver
from absl.testing import parameterized
import six
from third_party.nucleus.io import vcf
from third_party.nucleus.testing import test_utils
from deepvariant import testdata
from deepvariant.labeler import labeled_examples_to_vcf
FLAGS = flags.FLAGS
def setUpModule():
testdata.init()
class ExamplesToVCFUnitTest(parameterized.TestCase):
@flagsaver.flagsaver
def test_end2end(self):
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.examples = testdata.GOLDEN_TRAINING_EXAMPLES + '@3' # Sharded.
FLAGS.output_vcf = test_utils.test_tmpfile('examples_to_vcf.vcf')
labeled_examples_to_vcf.main(0)
self.assertEqual(
open(FLAGS.output_vcf).readlines(),
open(testdata.deepvariant_testdata(
'golden.training_examples.vcf')).readlines())
@flagsaver.flagsaver
def test_sample_name_flag(self):
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.examples = testdata.GOLDEN_TRAINING_EXAMPLES
FLAGS.sample_name = 'sample_name'
FLAGS.output_vcf = test_utils.test_tmpfile('no_sample_name.vcf')
labeled_examples_to_vcf.main(0)
with vcf.VcfReader(FLAGS.output_vcf) as vcf_reader:
self.assertEqual(
list(vcf_reader.header.sample_names), [FLAGS.sample_name])
@flagsaver.flagsaver
def test_raises_for_unlabeled_examples(self):
FLAGS.ref = testdata.CHR20_FASTA
FLAGS.examples = testdata.GOLDEN_CALLING_EXAMPLES
FLAGS.output_vcf = test_utils.test_tmpfile('unlabeled.vcf')
with six.assertRaisesRegex(
self, ValueError,
('Variant .* does not have any genotypes. This tool only works with '
'variants that have been labeled')):
labeled_examples_to_vcf.main(0)
if __name__ == '__main__':
absltest.main()
|
from pylab import *
import postgkyl.tools
import scipy.optimize
def lowPass(x, dt, C):
y = 0*x # filtered signal
alpha = dt/(C+dt)
y[0] = x[0]
for i in range(1,x.shape[0]):
y[i] = alpha*x[i] + (1-alpha)*y[i-1]
return y
dat = loadtxt("s3-es-iaw_phiInCell.dat")
T = dat[:,0]
E = dat[:,1]
dt = T[1]-T[0]
wH = 67.75 # computed from Maxima
# FFT filter it, removing high-frequency
fE = real(postgkyl.tools.butterFiltering(E/E[0], dt, 0.4*0.9*wH/(2*pi)))
plot(T, fE)
grid()
figure(2)
# pick range for analysis
tLo = T.searchsorted(3.0)
tHi = T.searchsorted(18.0)
T1 = T[tLo:tHi]
fE1 = fE[tLo:tHi]
fE1l = lowPass(fE1, T1[1]-T1[0], 0.0)
plot(T1, fE1, 'r-')
plot(T1, fE1l, 'k-')
axis('tight')
grid()
def findMax(t1, t2):
fv = fE1l[T1.searchsorted(t1):T1.searchsorted(t2)]
am = fv.argmax()
return T1[T1.searchsorted(t1):T1.searchsorted(t2)][am], fv[am]
t1, v1 = findMax(5.0, 6.0)
t2, v2 = findMax(10.0, 12.0)
t3, v3 = findMax(15.0, 17.5)
# plot these
plot([t1], [v1], 'bo')
plot([t2], [v2], 'bo')
plot([t3], [v3], 'bo')
# compute best fit
def func(an):
a0 = an[0]
rhs0 = exp(a0*(t2-t1)) - v2/v1
rho1 = exp(a0*(t3-t1)) - v3/v1
return rhs0, rho1
aout = scipy.optimize.fsolve(func, [0.0])
print(aout[0])
plot(T1, v1*exp(aout[0]*(T1-t1)), 'm-')
show()
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from io import open
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
with open(path.join(HERE, 'datadog_checks', 'dev', '__about__.py'), 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line.startswith('__version__'):
VERSION = line.split('=')[1].strip(' \'"')
break
else:
VERSION = '0.0.1'
with open(path.join(HERE, 'README.md'), 'r', encoding='utf-8') as f:
README = f.read()
REQUIRES = [
'coverage==4.5.4', # pinned due to https://github.com/nedbat/coveragepy/issues/883
'mock',
'psutil',
'PyYAML>=5.1',
'pytest',
'pytest-benchmark>=3.2.1',
'pytest-cov>=2.6.1',
'pytest-mock',
'requests>=2.22.0',
'six',
"shutilwhich==1.1.0; python_version < '3.0'",
"subprocess32==3.5.4; python_version < '3.0'",
]
setup(
name='datadog_checks_dev',
version=VERSION,
description='The Datadog Checks Developer Tools',
long_description=README,
long_description_content_type='text/markdown',
keywords='datadog agent checks dev tools tests',
url='https://github.com/DataDog/integrations-core',
author='Datadog',
author_email='packages@datadoghq.com',
license='BSD',
# See https://pypi.org/classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
packages=['datadog_checks', 'datadog_checks.dev'],
install_requires=REQUIRES,
include_package_data=True,
extras_require={
'cli': [
'appdirs',
'atomicwrites',
'click',
'colorama',
'docker-compose>=1.23.1,<1.24.0',
'in-toto>=0.4.1',
'pip-tools',
'pylint',
'Pillow',
'pyperclip>=1.7.0',
'semver',
'setuptools>=38.6.0',
'toml>=0.9.4, <1.0.0',
'tox>=3.12.1',
'twine>=1.11.0',
'wheel>=0.31.0',
]
},
entry_points={
'pytest11': ['datadog_checks = datadog_checks.dev.plugin.pytest'],
'tox': ['datadog_checks = datadog_checks.dev.plugin.tox'],
'console_scripts': ['ddev = datadog_checks.dev.tooling.cli:ddev'],
},
)
|
import numpy as np
from matplotlib import pyplot as plt
import torch
from torch.utils.data.sampler import Sampler
from torchvision import transforms, datasets
from PIL import Image
# Dummy class to store arguments
class Dummy():
pass
# Function that opens image from disk, normalizes it and converts to tensor
read_tensor = transforms.Compose([
lambda x: Image.open(x),
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
lambda x: torch.unsqueeze(x, 0)
])
# Plots image from tensor
def tensor_imshow(inp, title=None, **kwargs):
"""Imshow for Tensor."""
inp = inp.numpy().transpose((1, 2, 0))
# Mean and std for ImageNet
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp = std * inp + mean
inp = np.clip(inp, 0, 1)
plt.imshow(inp, **kwargs)
if title is not None:
plt.title(title)
# Given label number returns class name
def get_class_name(c):
labels = np.loadtxt('synset_words.txt', str, delimiter='\t')
return ' '.join(labels[c].split(',')[0].split()[1:])
# Image preprocessing function
preprocess = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
# Normalization for ImageNet
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
# Sampler for pytorch loader. Given range r loader will only
# return dataset[r] instead of whole dataset.
class RangeSampler(Sampler):
def __init__(self, r):
self.r = r
def __iter__(self):
return iter(self.r)
def __len__(self):
return len(self.r)
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
# This line only needed if building with NumPy in Cython file.
from numpy import get_include
from os import system
# compile the fortran modules without linking
ext_modules = [Extension(# module name:
'_new_grey',
# source file:
['_grey_gas.pyx'],
# other compile args for gcc
extra_compile_args=['-fPIC', '-O3', '-lgfortran'],
# other files to link to
)]
setup(name = '_new_grey',
cmdclass = {'build_ext': build_ext},
# Needed if building with NumPy.
# This includes the NumPy headers when compiling.
include_dirs = [get_include()],
ext_modules = ext_modules)
|
"""
Requires some installed modules:
pip3 install "msal>=0,<2"
pip3 install "requests>=2,<3"
The configuration file would look like this:
{
"authority": "https://login.microsoftonline.com/YOUR_TENANT_ID / SITE ID",
"client_id": "CLIENT_ID",
"scope": [ "https://graph.microsoft.com/.default" ],
"secret": "CLIENT_SECRET",
"driveId": "YOUR_DRIVE_ID"
}
You can then run this with a JSON configuration file:
python3 example.py
"""
import sys
import json
import logging
import os
import requests
import msal
def getToken(config):
app = msal.ConfidentialClientApplication(
config["client_id"], authority=config["authority"],
client_credential=config["secret"]
)
result = None
result = app.acquire_token_silent(config["scope"], account=None)
if not result:
logging.info("No suitable token exists in cache. Let's get a new one from AAD.")
result = app.acquire_token_for_client(scopes=config["scope"])
if "access_token" in result:
return result['access_token']
else:
print(result.get("error"))
print(result.get("error_description"))
print(result.get("correlation_id"))
# end getToken
def uploadFile(session, filename, driveId, folder=None):
# Upload a file to Sharepoint
fnameOnly = os.path.basename(filename)
# create the Graph endpoint to be used
if folder is not None:
endpoint = f'https://graph.microsoft.com/v1.0/drives/{driveId}/root:/{folder}/{fnameOnly}:/createUploadSession'
else:
endpoint = f'https://graph.microsoft.com/v1.0/drives/{driveId}/root:/{fnameOnly}:/createUploadSession'
jsonResponse = session.put(endpoint).json()
uploadUrl = jsonResponse["uploadUrl"]
# upload in chunks
filesize = os.path.getsize(filename)
with open(filename, 'rb') as fhandle:
startByte = 0
while True:
fileContent = fhandle.read(10*1024*1024)
dataLength = len(fileContent)
if dataLength <= 0:
break
endByte = startByte + dataLength - 1
crange = "bytes "+str(startByte)+"-"+str(endByte)+"/"+str(filesize)
print(crange)
chunkResponse = session.put(uploadUrl, headers={"Content-Length": str(dataLength),"Content-Range": crange}, data=fileContent)
if not chunkResponse.ok:
# something went wrong
print(f'<Response [{chunkResponse.status_code}]>')
pprint.pprint(chunkResponse.json())
break
startByte = endByte + 1
return chunkResponse
# end uploadFile
|
"""Set module shortcuts and globals"""
import logging
from pydicom.uid import UID
from ._version import __version__
_version = __version__.split(".")[:3]
# UID prefix provided by https://www.medicalconnections.co.uk/Free_UID
# Encoded as UI, maximum 64 characters
PYNETDICOM_UID_PREFIX = "1.2.826.0.1.3680043.9.3811."
"""``1.2.826.0.1.3680043.9.3811.``
The UID root used by *pynetdicom*.
"""
# Encoded as SH, maximum 16 characters
PYNETDICOM_IMPLEMENTATION_VERSION: str = f"PYNETDICOM_{''.join(_version)}"
"""The (0002,0013) *Implementation Version Name* used by *pynetdicom*"""
assert 1 <= len(PYNETDICOM_IMPLEMENTATION_VERSION) <= 16
PYNETDICOM_IMPLEMENTATION_UID: UID = UID(f"{PYNETDICOM_UID_PREFIX}{'.'.join(_version)}")
"""The (0002,0012) *Implementation Class UID* used by *pynetdicom*"""
assert PYNETDICOM_IMPLEMENTATION_UID.is_valid
# Convenience imports
from pynetdicom import events as evt
from pynetdicom.ae import ApplicationEntity as AE
from pynetdicom.association import Association
from pynetdicom._globals import (
ALL_TRANSFER_SYNTAXES,
DEFAULT_TRANSFER_SYNTAXES,
)
from pynetdicom.presentation import (
build_context,
build_role,
AllStoragePresentationContexts,
ApplicationEventLoggingPresentationContexts,
BasicWorklistManagementPresentationContexts,
ColorPalettePresentationContexts,
DefinedProcedureProtocolPresentationContexts,
DisplaySystemPresentationContexts,
HangingProtocolPresentationContexts,
ImplantTemplatePresentationContexts,
InstanceAvailabilityPresentationContexts,
MediaCreationManagementPresentationContexts,
MediaStoragePresentationContexts,
ModalityPerformedPresentationContexts,
NonPatientObjectPresentationContexts,
PrintManagementPresentationContexts,
ProcedureStepPresentationContexts,
ProtocolApprovalPresentationContexts,
QueryRetrievePresentationContexts,
RelevantPatientInformationPresentationContexts,
RTMachineVerificationPresentationContexts,
StoragePresentationContexts,
StorageCommitmentPresentationContexts,
SubstanceAdministrationPresentationContexts,
UnifiedProcedurePresentationContexts,
VerificationPresentationContexts,
)
# Setup default logging
logging.getLogger("pynetdicom").addHandler(logging.NullHandler())
def debug_logger() -> None:
"""Setup the logging for debugging."""
logger = logging.getLogger("pynetdicom")
# Ensure only have one StreamHandler
logger.handlers = []
handler = logging.StreamHandler()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(levelname).1s: %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# bachelor-thesis documentation build configuration file, created by
# sphinx-quickstart on Sun Jun 28 20:18:55 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'bachelor-thesis'
copyright = '2015, Dusty Wind'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'bachelor-thesisdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'bachelor-thesis.tex', 'bachelor-thesis Documentation',
'Dusty Wind', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bachelor-thesis', 'bachelor-thesis Documentation',
['Dusty Wind'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bachelor-thesis', 'bachelor-thesis Documentation',
'Dusty Wind', 'bachelor-thesis', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 15:45:29 2020
@author: Francesco Conforte
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sub.minimization as mymin
plt.close('all')
x=pd.read_csv("data/parkinsons_updrs.csv") # read the dataset; xx is a dataframe
x.describe().T # gives the statistical description of the content of each column
x.info()
features=list(x.columns)
print(features)
#features=['subject#', 'age', 'sex', 'test_time', 'motor_UPDRS', 'total_UPDRS',
# 'Jitter(%)', 'Jitter(Abs)', 'Jitter:RAP', 'Jitter:PPQ5', 'Jitter:DDP',
# 'Shimmer', 'Shimmer(dB)', 'Shimmer:APQ3', 'Shimmer:APQ5',
# 'Shimmer:APQ11', 'Shimmer:DDA', 'NHR', 'HNR', 'RPDE', 'DFA', 'PPE']
X=x.drop(['subject#','test_time'],axis=1)# drop unwanted features
Np,Nc=X.shape# Np = number of rows/ptients Nf=number of features+1 (total UPDRS is included)
features=list(X.columns)
#%% correlation
Xnorm=(X-X.mean())/X.std()# normalized data
c=Xnorm.cov()# xx.cov() gives the wrong result
plt.figure(figsize=(10,10))
plt.matshow(np.abs(c.values),fignum=0)
plt.xticks(np.arange(len(features)), features, rotation=90)
plt.yticks(np.arange(len(features)), features, rotation=0)
plt.colorbar()
plt.title('Covariance matrix of the features',pad=70)
plt.figure()
c.motor_UPDRS.plot()
plt.grid()
plt.xticks(np.arange(len(features)), features, rotation=90)#, **kwargs)
plt.title('corr coeff among motor UPDRS and the other features')
plt.gcf().subplots_adjust(bottom=0.3)
plt.figure()
c.total_UPDRS.plot()
plt.grid()
plt.xticks(np.arange(len(features)), features, rotation=90)#, **kwargs)
plt.title('corr coeff among total UPDRS and the other features')
plt.gcf().subplots_adjust(bottom=0.3)
#%% Generate the shuffled data
np.random.seed(1) # set the seed for random shuffling
indexsh=np.arange(Np)
np.random.shuffle(indexsh)
Xsh=X.copy(deep=True)
Xsh=Xsh.set_axis(indexsh,axis=0,inplace=False)
Xsh=Xsh.sort_index(axis=0)
#%% Generate training, validation and test matrices
Ntr=int(Np*0.5) # number of training points
Nva=int(Np*0.25) # number of validation points
Nte=Np-Ntr-Nva # number of test points
#%% evaluate mean and st.dev. for the training data only
X_tr=Xsh[0:Ntr]# dataframe that contains only the training data
mm=X_tr.mean()# mean (series)
ss=X_tr.std()# standard deviation (series)
my=mm['total_UPDRS']# mean of motor UPDRS
sy=ss['total_UPDRS']# st.dev of motor UPDRS
#%% Normalize the three subsets
Xsh_norm=(Xsh-mm)/ss# normalized data
ysh_norm=Xsh_norm['total_UPDRS']# regressand only
Xsh_norm=Xsh_norm.drop('total_UPDRS',axis=1)# regressors only
X_tr_norm=Xsh_norm[0:Ntr] #training regressors
X_va_norm=Xsh_norm[Ntr:Ntr+Nva] #validation regressors
X_te_norm=Xsh_norm[Ntr+Nva:] #test regressors
y_tr_norm=ysh_norm[0:Ntr] #training regressand
y_va_norm=ysh_norm[Ntr:Ntr+Nva] #validation regressand
y_te_norm=ysh_norm[Ntr+Nva:] #test regressand
y_norm=[y_tr_norm,y_va_norm,y_te_norm]
out=np.empty((9,))
#%% Linear Least Squares
lls = mymin.SolveLLS(y_tr_norm.values, X_tr_norm.values)
w_hat = lls.run()
#lls.plot_w_hat(X_tr_norm, title='Optimized weights - Linear Least Squares')
#w_hat=lls.sol
E_tr=(y_tr_norm-X_tr_norm@w_hat)*sy# training
E_va=(y_va_norm-X_va_norm@w_hat)*sy# validation
E_te=(y_te_norm-X_te_norm@w_hat)*sy# test
e=[E_tr,E_va,E_te]
y_hat_te_norm=X_te_norm@w_hat
MSE_norm_lls=np.mean((y_hat_te_norm-y_te_norm)**2)
MSE_lls=sy**2*MSE_norm_lls
y_hat_te=y_hat_te_norm*sy+my
y_te=y_te_norm*sy+my
lls.plot_w_hat(X_tr_norm, title='Optimized weights - Linear Least Squares')
lls.plot_hist('LLS',e)
lls.plot_y_hat_vs_y('Linear Least Squares: test',y_te,y_hat_te)
lls.print_result('Linear Least Squares',e,y_norm,sy,my)
#%% Stochastic Gradient with Adam
swa = mymin.SolveStochWithADAM(y_tr_norm.values, X_tr_norm.values,y_va_norm.values,X_va_norm.values)
w_hat = swa.run(gamma=1e-4,Nit=300000,sy=sy)
#swa.plot_w_hat(X_tr_norm, title='Optimized weights - Stochastic Gradient Algorithm with ADAM')
#w_hat=swa.sol
E_tr=(y_tr_norm-X_tr_norm@w_hat)*sy# training
E_va=(y_va_norm-X_va_norm@w_hat)*sy# validation
E_te=(y_te_norm-X_te_norm@w_hat)*sy# test
e=[E_tr,E_va,E_te]
y_hat_te_norm=X_te_norm@w_hat
MSE_norm_swa=np.mean((y_hat_te_norm-y_te_norm)**2)
MSE_swa=sy**2*MSE_norm_swa
y_te=y_te_norm*sy+my
y_hat_te=y_hat_te_norm*sy+my
swa.plot_w_hat(X_tr_norm, title='Optimized weights - Stochastic Gradient Algorithm with ADAM')
swa.plot_hist('Stochastic Gradient with ADAM',e)
swa.plot_err('Stochastic Gradient with ADAM: Mean Squared Error',1,0)
swa.plot_y_hat_vs_y('Stochastic Gradient with ADAM: test',y_te,y_hat_te)
swa.print_result('Stochastic Gradient with ADAM',e,y_norm,sy,my)
#%% Ridge Regression
rr = mymin.SolveRidge(y_tr_norm.values, X_tr_norm.values,y_va_norm.values,X_va_norm.values)
possible_lambdas = np.arange(101)
errors_tr = np.zeros((len(possible_lambdas),2),dtype=float)
errors_val = np.zeros((len(possible_lambdas),2),dtype=float)
for i in possible_lambdas:
w_lambda = rr.run(i)
#w_lambda = rr.sol
errors_tr[i,0]=i
errors_tr[i,1]=sy**2*np.mean((X_tr_norm.values@w_lambda-y_tr_norm.values)**2)
errors_val[i,0]=i
errors_val[i,1]=sy**2*np.mean((X_va_norm.values@w_lambda-y_va_norm.values)**2)
best = np.argmin(errors_val[:,1])
rr.plot_MSEvsLambda(errors_tr, errors_val, best)
w_hat = rr.run(best)
E_tr=(y_tr_norm-X_tr_norm@w_hat)*sy# training
E_va=(y_va_norm-X_va_norm@w_hat)*sy# validation
E_te=(y_te_norm-X_te_norm@w_hat)*sy# test
e=[E_tr,E_va,E_te]
y_hat_te_norm=X_te_norm@w_hat
MSE_norm_rr=np.mean((y_hat_te_norm-y_te_norm)**2)
MSE_rr=sy**2*MSE_norm_rr
y_hat_te=y_hat_te_norm*sy+my
y_te=y_te_norm*sy+my
rr.plot_w_hat(X_tr_norm, title='Optimized weights - Ridge Regression')
rr.plot_hist('Ridge Regression',e)
rr.plot_y_hat_vs_y('Ridge Regression: test',y_te,y_hat_te)
rr.print_result('Ridge Regression',e,y_norm,sy,my)
|
# %%
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options # for suppressing the browser
from selenium import webdriver
import warnings
from bs4 import BeautifulSoup as bs
import webbrowser
from webdriver_manager.chrome import ChromeDriverManager
options = Options()
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--headless")
with warnings.catch_warnings():
warnings.simplefilter('ignore')
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
# remove the options argument if u wanna see the browser open and perform the automated process
# %%
url = 'https://ucalgary.sona-systems.com'
user_ID = '<insert User ID here>'
password = '<insert Password here>'
driver.get(url)
driver.find_element(
By.ID, "ctl00_ContentPlaceHolder1_userid").send_keys(user_ID)
driver.find_element(By.ID, "pw").send_keys(password)
element = driver.find_element(
By.ID, "ctl00_ContentPlaceHolder1_default_auth_button")
driver.execute_script("arguments[0].click();", element)
WebDriverWait(driver, 10).until(EC.presence_of_element_located(
(By.ID, "lnkStudySignupLink"))).click()
# %%
html = driver.page_source
home_page = 'https://ucalgary.sona-systems.com/'
soup = bs(html, 'html.parser')
table_row = soup.find('tr').parent.findNextSibling()
study_links = table_row.findAll('a')
links = set()
for link in study_links:
links.add(f'{home_page}{link.get("href")}')
num_of_links = len(links)
if num_of_links == 0:
driver.close()
exit('\nThere are no studies available currently, see u later!')
print(f'\nthere is {num_of_links} available study') if num_of_links == 1 else print(
f'\nthere are {num_of_links} available studies')
# %%
for link in links:
driver.get(link)
already_completed = 0
link = driver.page_source
soup2 = bs(link, 'html.parser')
if len(driver.find_elements(By.ID, 'ctl00_ContentPlaceHolder1_lnkNonAdmin')) == 1:
description = soup2.find(
'span', {'id': 'ctl00_ContentPlaceHolder1_lblLongDesc'}).get_text(' ')
print(description)
if input("\nIf u wanna participate in this study, press Enter, if not, type any letter then press Enter and you will see the next avalable if there is any other") == '':
driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_lnkNonAdmin').click()
driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_repTimeSlots_ctl00_Submit_Button').click()
driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_Submit_Button').click()
if driver.find_element(By.ID, 'ctl00_SystemMessageLabel').text == 'Sign-up Successful':
if input('\nYou got signed up!, press Enter if u wanna start the research study, otherwise, type any letter then press Enter') == '':
if driver.find_elements(By.ID, 'ctl00_ContentPlaceHolder1_lnkWebsite').get_attribute('href') != 0:
study_link = driver.find_element(
By.ID, 'ctl00_ContentPlaceHolder1_lnkWebsite').get_attribute('href')
driver.close()
else:
driver.close()
exit('Seems like the study is not online, if the study is actually online and you got this message please contact me so I can fix this problem')
print('\nEnjoy!')
webbrowser.open(study_link)
else:
print(
'\nYou should recieve an email anytime now with the research link, have a woderful day')
else:
print(
"Either there's a problem with the code or the sign up was unsucessful, probably the former lol, plz lmk if u got this error")
elif len(driver.find_elements(By.ID, 'ctl00_ContentPlaceHolder1_lblNonAdmin')) == 1:
already_completed += 1
driver.close()
if already_completed > 0:
print('You have already completed all of the studies available') if already_completed == num_of_links else print(
f'You have already completed {already_completed} of the studies available')
|
from django.db import models
class repairOrder(models.Model):
begintime = models.DateField(auto_now_add=True, verbose_name="开始时间")
state = models.BooleanField(verbose_name="完成结果",default=False)
worker = models.ForeignKey("login.User",on_delete=models.CASCADE,related_name='worker',verbose_name="对应工人",default=None)
rentorder = models.ForeignKey("rent.rentOrder",on_delete=models.CASCADE,related_name='rentorder',verbose_name="对应订单",default=None)
repair_ownUser = models.ForeignKey("login.User",on_delete=models.CASCADE,related_name='repair_ownUser',verbose_name="房东",default=None)
repair_paidUser = models.ForeignKey("login.User",on_delete=models.CASCADE,related_name='repair_paidUser',verbose_name="房客",default=None)
house = models.ForeignKey("house.house",on_delete=models.CASCADE,default=None)
content = models.CharField(verbose_name="报修内容",max_length=256,default=None)
response = models.CharField(verbose_name="回复内容", max_length=256,default=None)
class subscribe(models.Model):
owner = models.ForeignKey("login.User",on_delete=models.CASCADE,related_name='owner',verbose_name="投诉者",default=None)
content = models.CharField(verbose_name="投诉内容", max_length=256,default=None)
response = models.CharField(verbose_name="回复内容", max_length=256,default=None)
|
import sys
if "" not in sys.path: sys.path.append("")
if "src" not in sys.path: sys.path.append("src")
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class Window(QMainWindow):
def __init__(self):
super().__init__()
# setting title
self.setWindowTitle("Python ")
# setting geometry
self.setGeometry(100, 100, 1200, 800)
# calling method
self.UiComponents()
# showing all the widgets
self.showMaximized()
# method for widgets
def UiComponents(self):
# creating label
label = QLabel("Label", self)
# setting geometry to label
label.setGeometry(100, 100, 120, 40)
# adding border to label
label.setStyleSheet("border : 2px solid black")
# opening window in maximized size
self.showMaximized()
def ScanSelection(paths, settings):
raise UserWarning("The scan selection GUI is not implemented yet!")
if __name__ == "__main__":
ScanSelection(..., ...)
|
from abc import ABC, abstractmethod
class AuthTypeBase(ABC):
"""Base type for all authentication types."""
def __init__(self):
super().__init__()
@abstractmethod
def is_valid_authentication_type(self):
"""Return True if the auth type is valid, e.g. it can return userinfo and username.
(AuthTypeNone is the only one type that returns False)"""
pass
def requires_client_login(self):
"""Return True if the user needs to login from the client (e.g. Login button is shown)"""
return False
@abstractmethod
def complete_setup(self, app):
"""complete any setup that may be needed by this auth type. The Flask app is passed in.
This is the last auth function called before the server starts to run."""
pass
@abstractmethod
def is_user_authenticated(self):
"""Return True if the user is authenticated"""
pass
@abstractmethod
def get_user_id(self):
"""Return the id for this user (string)"""
pass
@abstractmethod
def get_user_name(self):
"""Return the name of the user (string)"""
pass
@abstractmethod
def get_user_email(self):
"""Return the name of the user (string)"""
pass
def get_user_picture(self):
"""Return the location to the user's picture"""
return None
class AuthTypeClientBase(AuthTypeBase):
"""Base type for all authentication types that require the client to login"""
def __init__(self):
super().__init__()
def requires_client_login(self):
return True
@abstractmethod
def add_url_rules(self, selfapp):
"""Add url rules to the app (like /login, /logout, etc)"""
pass
@abstractmethod
def get_login_url(self, data_adaptor):
"""Return the url for the login route"""
pass
@abstractmethod
def get_logout_url(self, data_adaptor):
"""Return the url for the logout route"""
pass
class AuthTypeFactory:
"""Factory class to create an authentication type"""
auth_types = {}
@staticmethod
def register(name, auth_type):
assert issubclass(auth_type, AuthTypeBase)
AuthTypeFactory.auth_types[name] = auth_type
@staticmethod
def create(name, app_config):
auth_type = AuthTypeFactory.auth_types.get(name)
if auth_type is None:
return None
return auth_type(app_config)
|
import unittest
import os
import threading
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from http.server import BaseHTTPRequestHandler, HTTPServer
from google.cloud import bigquery
from google.auth.exceptions import DefaultCredentialsError
from kaggle_gcp import KaggleKernelCredentials, PublicBigqueryClient
class TestBigQuery(unittest.TestCase):
def _test_proxy(self, client, should_use_proxy):
class HTTPHandler(BaseHTTPRequestHandler):
called = False
header_found = False
def do_HEAD(s):
s.send_response(200)
def do_GET(s):
HTTPHandler.called = True
HTTPHandler.header_found = any(k for k in s.headers if k == "X-KAGGLE-PROXY-DATA" and s.headers[k] == "test-key")
s.send_response(200)
server_address = urlparse(os.getenv('KAGGLE_DATA_PROXY_URL'))
with HTTPServer((server_address.hostname, server_address.port), HTTPHandler) as httpd:
threading.Thread(target=httpd.serve_forever).start()
try:
for ds in client.list_datasets(): pass
except:
pass
httpd.shutdown()
if should_use_proxy:
self.assertTrue(HTTPHandler.called, msg="Fake server did not receive a request from the BQ client.")
self.assertTrue(HTTPHandler.header_found, msg="X-KAGGLE-PROXY-DATA header was missing from the BQ request.")
else:
self.assertFalse(HTTPHandler.called, msg="Fake server was called from the BQ client, but should not have been.")
def test_proxy_using_library(self):
env = EnvironmentVarGuard()
env.unset('KAGGLE_BQ_USER_JWT')
with env:
client = PublicBigqueryClient()
self._test_proxy(client, should_use_proxy=True)
def test_proxy_no_project(self):
env = EnvironmentVarGuard()
env.unset('KAGGLE_BQ_USER_JWT')
with env:
client = bigquery.Client()
self._test_proxy(client, should_use_proxy=True)
def test_project_with_connected_account(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_BQ_USER_JWT', 'foobar')
with env:
client = bigquery.Client(project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_proxy(client, should_use_proxy=False)
def test_simultaneous_clients(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_BQ_USER_JWT', 'foobar')
with env:
proxy_client = bigquery.Client()
self._test_proxy(proxy_client, should_use_proxy=True)
bq_client = bigquery.Client(project='ANOTHER_PROJECT', credentials=KaggleKernelCredentials())
self._test_proxy(bq_client, should_use_proxy=False)
# Verify that proxy client is still going to proxy to ensure global Connection
# isn't being modified.
self._test_proxy(proxy_client, should_use_proxy=True)
def test_no_project_with_connected_account(self):
env = EnvironmentVarGuard()
env.set('KAGGLE_BQ_USER_JWT', 'foobar')
with env:
with self.assertRaises(DefaultCredentialsError):
# TODO(vimota): Handle this case, either default to Kaggle Proxy or use some default project
# by the user or throw a custom exception.
client = bigquery.Client(credentials=KaggleKernelCredentials())
self._test_proxy(client, should_use_proxy=False)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._storages_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class StoragesOperations:
"""StoragesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2022_03_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
service_name: str,
storage_name: str,
**kwargs: Any
) -> "_models.StorageResource":
"""Get the storage resource.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param storage_name: The name of the storage resource.
:type storage_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_03_01_preview.models.StorageResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
storage_name=storage_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
storage_name: str,
storage_resource: "_models.StorageResource",
**kwargs: Any
) -> "_models.StorageResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(storage_resource, 'StorageResource')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
storage_name=storage_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('StorageResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('StorageResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('StorageResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
storage_name: str,
storage_resource: "_models.StorageResource",
**kwargs: Any
) -> AsyncLROPoller["_models.StorageResource"]:
"""Create or update storage resource.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param storage_name: The name of the storage resource.
:type storage_name: str
:param storage_resource: Parameters for the create or update operation.
:type storage_resource: ~azure.mgmt.appplatform.v2022_03_01_preview.models.StorageResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either StorageResource or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.appplatform.v2022_03_01_preview.models.StorageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
storage_name=storage_name,
storage_resource=storage_resource,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('StorageResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
service_name: str,
storage_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
storage_name=storage_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
service_name: str,
storage_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete the storage resource.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param storage_name: The name of the storage resource.
:type storage_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
storage_name=storage_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> AsyncIterable["_models.StorageResourceCollection"]:
"""List all the storages of one Azure Spring Cloud instance.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageResourceCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.appplatform.v2022_03_01_preview.models.StorageResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("StorageResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages'} # type: ignore
|
# -*- coding: UTF-8 -*-
# File Name:ada_boost_tf
# Author : Chen Quan
# Date:2019/2/18
# Description : Use TensorFlow to implement AdaBoost Algorithm.
__author__ = 'Chen Quan'
"""
暂未实现
"""
|
"""Tests for algorithms for computing symbolic roots of polynomials. """
from sympy import (S, symbols, Symbol, Wild, Integer, Rational, sqrt,
powsimp, Lambda, sin, cos, pi, I, Interval, re, im, exp, ZZ, Piecewise,
acos, default_sort_key, root)
from sympy.polys import (Poly, cyclotomic_poly, intervals, nroots, RootOf,
PolynomialError)
from sympy.polys.polyroots import (root_factors, roots_linear,
roots_quadratic, roots_cubic, roots_quartic, roots_cyclotomic,
roots_binomial, preprocess_roots, roots)
from sympy.polys.orthopolys import legendre_poly
from sympy.polys.polyutils import _nsort
from sympy.utilities.iterables import cartes
from sympy.utilities.pytest import raises, XFAIL
from sympy.utilities.randtest import verify_numerically
import sympy
a, b, c, d, e, q, t, x, y, z = symbols('a,b,c,d,e,q,t,x,y,z')
def test_roots_linear():
assert roots_linear(Poly(2*x + 1, x)) == [-Rational(1, 2)]
def test_roots_quadratic():
assert roots_quadratic(Poly(2*x**2, x)) == [0, 0]
assert roots_quadratic(Poly(2*x**2 + 3*x, x)) == [-Rational(3, 2), 0]
assert roots_quadratic(Poly(2*x**2 + 3, x)) == [-I*sqrt(6)/2, I*sqrt(6)/2]
assert roots_quadratic(Poly(2*x**2 + 4*x + 3, x)) == [-1 - I*sqrt(2)/2, -1 + I*sqrt(2)/2]
f = x**2 + (2*a*e + 2*c*e)/(a - c)*x + (d - b + a*e**2 - c*e**2)/(a - c)
assert roots_quadratic(Poly(f, x)) == \
[-e*(a + c)/(a - c) - sqrt((a*b + c*d - a*d - b*c + 4*a*c*e**2)/(a - c)**2),
-e*(a + c)/(a - c) + sqrt((a*b + c*d - a*d - b*c + 4*a*c*e**2)/(a - c)**2)]
# check for simplification
f = Poly(y*x**2 - 2*x - 2*y, x)
assert roots_quadratic(f) == \
[-sqrt(2*y**2 + 1)/y + 1/y, sqrt(2*y**2 + 1)/y + 1/y]
f = Poly(x**2 + (-y**2 - 2)*x + y**2 + 1, x)
assert roots_quadratic(f) == \
[y**2/2 - sqrt(y**4)/2 + 1, y**2/2 + sqrt(y**4)/2 + 1]
f = Poly(sqrt(2)*x**2 - 1, x)
r = roots_quadratic(f)
assert r == _nsort(r)
# issue 8255
f = Poly(-24*x**2 - 180*x + 264)
assert [w.n(2) for w in f.all_roots(radicals=True)] == \
[w.n(2) for w in f.all_roots(radicals=False)]
for _a, _b, _c in cartes((-2, 2), (-2, 2), (0, -1)):
f = Poly(_a*x**2 + _b*x + _c)
roots = roots_quadratic(f)
assert roots == _nsort(roots)
def test_issue_8438():
p = Poly([1, y, -2, -3], x).as_expr()
roots = roots_cubic(Poly(p, x), x)
z = -S(3)/2 - 7*I/2 # this will fail in code given in commit msg
post = [r.subs(y, z) for r in roots]
assert set(post) == \
set(roots_cubic(Poly(p.subs(y, z), x)))
# /!\ if p is not made an expression, this is *very* slow
assert all(p.subs({y: z, x: i}).n(2, chop=True) == 0 for i in post)
def test_issue_8285():
roots = (Poly(4*x**8 - 1, x)*Poly(x**2 + 1)).all_roots()
assert roots == _nsort(roots)
f = Poly(x**4 + 5*x**2 + 6, x)
ro = [RootOf(f, i) for i in range(4)]
roots = Poly(x**4 + 5*x**2 + 6, x).all_roots()
assert roots == ro
assert roots == _nsort(roots)
# more than 2 complex roots from which to identify the
# imaginary ones
roots = Poly(2*x**8 - 1).all_roots()
assert roots == _nsort(roots)
assert len(Poly(2*x**10 - 1).all_roots()) == 10 # doesn't fail
def test_issue_8289():
roots = (Poly(x**2 + 2)*Poly(x**4 + 2)).all_roots()
assert roots == _nsort(roots)
roots = Poly(x**6 + 3*x**3 + 2, x).all_roots()
assert roots == _nsort(roots)
roots = Poly(x**6 - x + 1).all_roots()
assert roots == _nsort(roots)
# all imaginary roots
roots = Poly(x**4 + 4*x**2 + 4, x).all_roots()
assert roots == _nsort(roots)
def test_roots_cubic():
assert roots_cubic(Poly(2*x**3, x)) == [0, 0, 0]
assert roots_cubic(Poly(x**3 - 3*x**2 + 3*x - 1, x)) == [1, 1, 1]
assert roots_cubic(Poly(x**3 + 1, x)) == \
[-1, S.Half - I*sqrt(3)/2, S.Half + I*sqrt(3)/2]
assert roots_cubic(Poly(2*x**3 - 3*x**2 - 3*x - 1, x))[0] == \
S.Half + 3**Rational(1, 3)/2 + 3**Rational(2, 3)/2
eq = -x**3 + 2*x**2 + 3*x - 2
assert roots(eq, trig=True, multiple=True) == \
roots_cubic(Poly(eq, x), trig=True) == [
S(2)/3 + 2*sqrt(13)*cos(acos(8*sqrt(13)/169)/3)/3,
-2*sqrt(13)*sin(-acos(8*sqrt(13)/169)/3 + pi/6)/3 + S(2)/3,
-2*sqrt(13)*cos(-acos(8*sqrt(13)/169)/3 + pi/3)/3 + S(2)/3,
]
def test_roots_quartic():
assert roots_quartic(Poly(x**4, x)) == [0, 0, 0, 0]
assert roots_quartic(Poly(x**4 + x**3, x)) in [
[-1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, -1]
]
assert roots_quartic(Poly(x**4 - x**3, x)) in [
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]
]
lhs = roots_quartic(Poly(x**4 + x, x))
rhs = [S.Half + I*sqrt(3)/2, S.Half - I*sqrt(3)/2, S.Zero, -S.One]
assert sorted(lhs, key=hash) == sorted(rhs, key=hash)
# test of all branches of roots quartic
for i, (a, b, c, d) in enumerate([(1, 2, 3, 0),
(3, -7, -9, 9),
(1, 2, 3, 4),
(1, 2, 3, 4),
(-7, -3, 3, -6),
(-3, 5, -6, -4),
(6, -5, -10, -3)]):
if i == 2:
c = -a*(a**2/S(8) - b/S(2))
elif i == 3:
d = a*(a*(3*a**2/S(256) - b/S(16)) + c/S(4))
eq = x**4 + a*x**3 + b*x**2 + c*x + d
ans = roots_quartic(Poly(eq, x))
assert all(eq.subs(x, ai).n(chop=True) == 0 for ai in ans)
# not all symbolic quartics are unresolvable
eq = Poly(q*x + q/4 + x**4 + x**3 + 2*x**2 - Rational(1, 3), x)
sol = roots_quartic(eq)
assert all(verify_numerically(eq.subs(x, i), 0) for i in sol)
z = symbols('z', negative=True)
eq = x**4 + 2*x**3 + 3*x**2 + x*(z + 11) + 5
zans = roots_quartic(Poly(eq, x))
assert all([verify_numerically(eq.subs(((x, i), (z, -1))), 0) for i in zans])
# but some are (see also issue 4989)
# it's ok if the solution is not Piecewise, but the tests below should pass
eq = Poly(y*x**4 + x**3 - x + z, x)
ans = roots_quartic(eq)
assert all(type(i) == Piecewise for i in ans)
reps = (
dict(y=-Rational(1, 3), z=-Rational(1, 4)), # 4 real
dict(y=-Rational(1, 3), z=-Rational(1, 2)), # 2 real
dict(y=-Rational(1, 3), z=-2)) # 0 real
for rep in reps:
sol = roots_quartic(Poly(eq.subs(rep), x))
assert all([verify_numerically(w.subs(rep) - s, 0) for w, s in zip(ans, sol)])
def test_roots_cyclotomic():
assert roots_cyclotomic(cyclotomic_poly(1, x, polys=True)) == [1]
assert roots_cyclotomic(cyclotomic_poly(2, x, polys=True)) == [-1]
assert roots_cyclotomic(cyclotomic_poly(
3, x, polys=True)) == [-S(1)/2 - I*sqrt(3)/2, -S(1)/2 + I*sqrt(3)/2]
assert roots_cyclotomic(cyclotomic_poly(4, x, polys=True)) == [-I, I]
assert roots_cyclotomic(cyclotomic_poly(
6, x, polys=True)) == [S(1)/2 - I*sqrt(3)/2, S(1)/2 + I*sqrt(3)/2]
assert roots_cyclotomic(cyclotomic_poly(7, x, polys=True)) == [
-cos(pi/7) - I*sin(pi/7),
-cos(pi/7) + I*sin(pi/7),
-cos(3*pi/7) - I*sin(3*pi/7),
-cos(3*pi/7) + I*sin(3*pi/7),
cos(2*pi/7) - I*sin(2*pi/7),
cos(2*pi/7) + I*sin(2*pi/7),
]
assert roots_cyclotomic(cyclotomic_poly(8, x, polys=True)) == [
-sqrt(2)/2 - I*sqrt(2)/2,
-sqrt(2)/2 + I*sqrt(2)/2,
sqrt(2)/2 - I*sqrt(2)/2,
sqrt(2)/2 + I*sqrt(2)/2,
]
assert roots_cyclotomic(cyclotomic_poly(12, x, polys=True)) == [
-sqrt(3)/2 - I/2,
-sqrt(3)/2 + I/2,
sqrt(3)/2 - I/2,
sqrt(3)/2 + I/2,
]
assert roots_cyclotomic(
cyclotomic_poly(1, x, polys=True), factor=True) == [1]
assert roots_cyclotomic(
cyclotomic_poly(2, x, polys=True), factor=True) == [-1]
assert roots_cyclotomic(cyclotomic_poly(3, x, polys=True), factor=True) == \
[-root(-1, 3), -1 + root(-1, 3)]
assert roots_cyclotomic(cyclotomic_poly(4, x, polys=True), factor=True) == \
[-I, I]
assert roots_cyclotomic(cyclotomic_poly(5, x, polys=True), factor=True) == \
[-root(-1, 5), -root(-1, 5)**3, root(-1, 5)**2, -1 - root(-1, 5)**2 + root(-1, 5) + root(-1, 5)**3]
assert roots_cyclotomic(cyclotomic_poly(6, x, polys=True), factor=True) == \
[1 - root(-1, 3), root(-1, 3)]
def test_roots_binomial():
assert roots_binomial(Poly(5*x, x)) == [0]
assert roots_binomial(Poly(5*x**4, x)) == [0, 0, 0, 0]
assert roots_binomial(Poly(5*x + 2, x)) == [-Rational(2, 5)]
A = 10**Rational(3, 4)/10
assert roots_binomial(Poly(5*x**4 + 2, x)) == \
[-A - A*I, -A + A*I, A - A*I, A + A*I]
a1 = Symbol('a1', nonnegative=True)
b1 = Symbol('b1', nonnegative=True)
r0 = roots_quadratic(Poly(a1*x**2 + b1, x))
r1 = roots_binomial(Poly(a1*x**2 + b1, x))
assert powsimp(r0[0]) == powsimp(r1[0])
assert powsimp(r0[1]) == powsimp(r1[1])
for a, b, s, n in cartes((1, 2), (1, 2), (-1, 1), (2, 3, 4, 5)):
if a == b and a != 1: # a == b == 1 is sufficient
continue
p = Poly(a*x**n + s*b)
roots = roots_binomial(p)
assert roots == _nsort(roots)
def test_roots_preprocessing():
f = a*y*x**2 + y - b
coeff, poly = preprocess_roots(Poly(f, x))
assert coeff == 1
assert poly == Poly(a*y*x**2 + y - b, x)
f = c**3*x**3 + c**2*x**2 + c*x + a
coeff, poly = preprocess_roots(Poly(f, x))
assert coeff == 1/c
assert poly == Poly(x**3 + x**2 + x + a, x)
f = c**3*x**3 + c**2*x**2 + a
coeff, poly = preprocess_roots(Poly(f, x))
assert coeff == 1/c
assert poly == Poly(x**3 + x**2 + a, x)
f = c**3*x**3 + c*x + a
coeff, poly = preprocess_roots(Poly(f, x))
assert coeff == 1/c
assert poly == Poly(x**3 + x + a, x)
f = c**3*x**3 + a
coeff, poly = preprocess_roots(Poly(f, x))
assert coeff == 1/c
assert poly == Poly(x**3 + a, x)
E, F, J, L = symbols("E,F,J,L")
f = -21601054687500000000*E**8*J**8/L**16 + \
508232812500000000*F*x*E**7*J**7/L**14 - \
4269543750000000*E**6*F**2*J**6*x**2/L**12 + \
16194716250000*E**5*F**3*J**5*x**3/L**10 - \
27633173750*E**4*F**4*J**4*x**4/L**8 + \
14840215*E**3*F**5*J**3*x**5/L**6 + \
54794*E**2*F**6*J**2*x**6/(5*L**4) - \
1153*E*J*F**7*x**7/(80*L**2) + \
633*F**8*x**8/160000
coeff, poly = preprocess_roots(Poly(f, x))
assert coeff == 20*E*J/(F*L**2)
assert poly == 633*x**8 - 115300*x**7 + 4383520*x**6 + 296804300*x**5 - 27633173750*x**4 + \
809735812500*x**3 - 10673859375000*x**2 + 63529101562500*x - 135006591796875
f = Poly(-y**2 + x**2*exp(x), y, domain=ZZ[x, exp(x)])
g = Poly(-y**2 + exp(x), y, domain=ZZ[exp(x)])
assert preprocess_roots(f) == (x, g)
def test_roots0():
assert roots(1, x) == {}
assert roots(x, x) == {S.Zero: 1}
assert roots(x**9, x) == {S.Zero: 9}
assert roots(((x - 2)*(x + 3)*(x - 4)).expand(), x) == {-S(3): 1, S(2): 1, S(4): 1}
assert roots(2*x + 1, x) == {-S.Half: 1}
assert roots((2*x + 1)**2, x) == {-S.Half: 2}
assert roots((2*x + 1)**5, x) == {-S.Half: 5}
assert roots((2*x + 1)**10, x) == {-S.Half: 10}
assert roots(x**4 - 1, x) == {I: 1, S.One: 1, -S.One: 1, -I: 1}
assert roots((x**4 - 1)**2, x) == {I: 2, S.One: 2, -S.One: 2, -I: 2}
assert roots(((2*x - 3)**2).expand(), x) == { Rational(3, 2): 2}
assert roots(((2*x + 3)**2).expand(), x) == {-Rational(3, 2): 2}
assert roots(((2*x - 3)**3).expand(), x) == { Rational(3, 2): 3}
assert roots(((2*x + 3)**3).expand(), x) == {-Rational(3, 2): 3}
assert roots(((2*x - 3)**5).expand(), x) == { Rational(3, 2): 5}
assert roots(((2*x + 3)**5).expand(), x) == {-Rational(3, 2): 5}
assert roots(((a*x - b)**5).expand(), x) == { b/a: 5}
assert roots(((a*x + b)**5).expand(), x) == {-b/a: 5}
assert roots(x**2 + (-a - 1)*x + a, x) == {a: 1, S.One: 1}
assert roots(x**4 - 2*x**2 + 1, x) == {S.One: 2, -S.One: 2}
assert roots(x**6 - 4*x**4 + 4*x**3 - x**2, x) == \
{S.One: 2, -1 - sqrt(2): 1, S.Zero: 2, -1 + sqrt(2): 1}
assert roots(x**8 - 1, x) == {
sqrt(2)/2 + I*sqrt(2)/2: 1,
sqrt(2)/2 - I*sqrt(2)/2: 1,
-sqrt(2)/2 + I*sqrt(2)/2: 1,
-sqrt(2)/2 - I*sqrt(2)/2: 1,
S.One: 1, -S.One: 1, I: 1, -I: 1
}
f = -2016*x**2 - 5616*x**3 - 2056*x**4 + 3324*x**5 + 2176*x**6 - \
224*x**7 - 384*x**8 - 64*x**9
assert roots(f) == {S(0): 2, -S(2): 2, S(2): 1, -S(7)/2: 1, -S(3)/2: 1, -S(1)/2: 1, S(3)/2: 1}
assert roots((a + b + c)*x - (a + b + c + d), x) == {(a + b + c + d)/(a + b + c): 1}
assert roots(x**3 + x**2 - x + 1, x, cubics=False) == {}
assert roots(((x - 2)*(
x + 3)*(x - 4)).expand(), x, cubics=False) == {-S(3): 1, S(2): 1, S(4): 1}
assert roots(((x - 2)*(x + 3)*(x - 4)*(x - 5)).expand(), x, cubics=False) == \
{-S(3): 1, S(2): 1, S(4): 1, S(5): 1}
assert roots(x**3 + 2*x**2 + 4*x + 8, x) == {-S(2): 1, -2*I: 1, 2*I: 1}
assert roots(x**3 + 2*x**2 + 4*x + 8, x, cubics=True) == \
{-2*I: 1, 2*I: 1, -S(2): 1}
assert roots((x**2 - x)*(x**3 + 2*x**2 + 4*x + 8), x ) == \
{S(1): 1, S(0): 1, -S(2): 1, -2*I: 1, 2*I: 1}
r1_2, r1_3 = Rational(1, 2), Rational(1, 3)
x0 = (3*sqrt(33) + 19)**r1_3
x1 = 4/x0/3
x2 = x0/3
x3 = sqrt(3)*I/2
x4 = x3 - r1_2
x5 = -x3 - r1_2
assert roots(x**3 + x**2 - x + 1, x, cubics=True) == {
-x1 - x2 - r1_3: 1,
-x1/x4 - x2*x4 - r1_3: 1,
-x1/x5 - x2*x5 - r1_3: 1,
}
f = (x**2 + 2*x + 3).subs(x, 2*x**2 + 3*x).subs(x, 5*x - 4)
r13_20, r1_20 = [ Rational(*r)
for r in ((13, 20), (1, 20)) ]
s2 = sqrt(2)
assert roots(f, x) == {
r13_20 + r1_20*sqrt(1 - 8*I*s2): 1,
r13_20 - r1_20*sqrt(1 - 8*I*s2): 1,
r13_20 + r1_20*sqrt(1 + 8*I*s2): 1,
r13_20 - r1_20*sqrt(1 + 8*I*s2): 1,
}
f = x**4 + x**3 + x**2 + x + 1
r1_4, r1_8, r5_8 = [ Rational(*r) for r in ((1, 4), (1, 8), (5, 8)) ]
assert roots(f, x) == {
-r1_4 + r1_4*5**r1_2 + I*(r5_8 + r1_8*5**r1_2)**r1_2: 1,
-r1_4 + r1_4*5**r1_2 - I*(r5_8 + r1_8*5**r1_2)**r1_2: 1,
-r1_4 - r1_4*5**r1_2 + I*(r5_8 - r1_8*5**r1_2)**r1_2: 1,
-r1_4 - r1_4*5**r1_2 - I*(r5_8 - r1_8*5**r1_2)**r1_2: 1,
}
f = z**3 + (-2 - y)*z**2 + (1 + 2*y - 2*x**2)*z - y + 2*x**2
assert roots(f, z) == {
S.One: 1,
S.Half + S.Half*y + S.Half*sqrt(1 - 2*y + y**2 + 8*x**2): 1,
S.Half + S.Half*y - S.Half*sqrt(1 - 2*y + y**2 + 8*x**2): 1,
}
assert roots(a*b*c*x**3 + 2*x**2 + 4*x + 8, x, cubics=False) == {}
assert roots(a*b*c*x**3 + 2*x**2 + 4*x + 8, x, cubics=True) != {}
assert roots(x**4 - 1, x, filter='Z') == {S.One: 1, -S.One: 1}
assert roots(x**4 - 1, x, filter='I') == {I: 1, -I: 1}
assert roots((x - 1)*(x + 1), x) == {S.One: 1, -S.One: 1}
assert roots(
(x - 1)*(x + 1), x, predicate=lambda r: r.is_positive) == {S.One: 1}
assert roots(x**4 - 1, x, filter='Z', multiple=True) == [-S.One, S.One]
assert roots(x**4 - 1, x, filter='I', multiple=True) == [I, -I]
assert roots(x**3, x, multiple=True) == [S.Zero, S.Zero, S.Zero]
assert roots(1234, x, multiple=True) == []
f = x**6 - x**5 + x**4 - x**3 + x**2 - x + 1
assert roots(f) == {
-I*sin(pi/7) + cos(pi/7): 1,
-I*sin(2*pi/7) - cos(2*pi/7): 1,
-I*sin(3*pi/7) + cos(3*pi/7): 1,
I*sin(pi/7) + cos(pi/7): 1,
I*sin(2*pi/7) - cos(2*pi/7): 1,
I*sin(3*pi/7) + cos(3*pi/7): 1,
}
g = ((x**2 + 1)*f**2).expand()
assert roots(g) == {
-I*sin(pi/7) + cos(pi/7): 2,
-I*sin(2*pi/7) - cos(2*pi/7): 2,
-I*sin(3*pi/7) + cos(3*pi/7): 2,
I*sin(pi/7) + cos(pi/7): 2,
I*sin(2*pi/7) - cos(2*pi/7): 2,
I*sin(3*pi/7) + cos(3*pi/7): 2,
-I: 1, I: 1,
}
r = roots(x**3 + 40*x + 64)
real_root = [rx for rx in r if rx.is_real][0]
cr = 108 + 6*sqrt(1074)
assert real_root == -2*root(cr, 3)/3 + 20/root(cr, 3)
eq = Poly((7 + 5*sqrt(2))*x**3 + (-6 - 4*sqrt(2))*x**2 + (-sqrt(2) - 1)*x + 2, x, domain='EX')
assert roots(eq) == {-1 + sqrt(2): 1, -2 + 2*sqrt(2): 1, -sqrt(2) + 1: 1}
eq = Poly(41*x**5 + 29*sqrt(2)*x**5 - 153*x**4 - 108*sqrt(2)*x**4 +
175*x**3 + 125*sqrt(2)*x**3 - 45*x**2 - 30*sqrt(2)*x**2 - 26*sqrt(2)*x -
26*x + 24, x, domain='EX')
assert roots(eq) == {-sqrt(2) + 1: 1, -2 + 2*sqrt(2): 1, -1 + sqrt(2): 1,
-4 + 4*sqrt(2): 1, -3 + 3*sqrt(2): 1}
eq = Poly(x**3 - 2*x**2 + 6*sqrt(2)*x**2 - 8*sqrt(2)*x + 23*x - 14 +
14*sqrt(2), x, domain='EX')
assert roots(eq) == {-2*sqrt(2) + 2: 1, -2*sqrt(2) + 1: 1, -2*sqrt(2) - 1: 1}
assert roots(Poly((x + sqrt(2))**3 - 7, x, domain='EX')) == \
{-sqrt(2) - root(7, 3)/2 - sqrt(3)*root(7, 3)*I/2: 1,
-sqrt(2) - root(7, 3)/2 + sqrt(3)*root(7, 3)*I/2: 1,
-sqrt(2) + root(7, 3): 1}
def test_roots_slow():
"""Just test that calculating these roots does not hang. """
a, b, c, d, x = symbols("a,b,c,d,x")
f1 = x**2*c + (a/b) + x*c*d - a
f2 = x**2*(a + b*(c - d)*a) + x*a*b*c/(b*d - d) + (a*d - c/d)
assert list(roots(f1, x).values()) == [1, 1]
assert list(roots(f2, x).values()) == [1, 1]
(zz, yy, xx, zy, zx, yx, k) = symbols("zz,yy,xx,zy,zx,yx,k")
e1 = (zz - k)*(yy - k)*(xx - k) + zy*yx*zx + zx - zy - yx
e2 = (zz - k)*yx*yx + zx*(yy - k)*zx + zy*zy*(xx - k)
assert list(roots(e1 - e2, k).values()) == [1, 1, 1]
f = x**3 + 2*x**2 + 8
R = list(roots(f).keys())
assert not any(i for i in [f.subs(x, ri).n(chop=True) for ri in R])
def test_roots_inexact():
R1 = roots(x**2 + x + 1, x, multiple=True)
R2 = roots(x**2 + x + 1.0, x, multiple=True)
for r1, r2 in zip(R1, R2):
assert abs(r1 - r2) < 1e-12
f = x**4 + 3.0*sqrt(2.0)*x**3 - (78.0 + 24.0*sqrt(3.0))*x**2 \
+ 144.0*(2*sqrt(3.0) + 9.0)
R1 = roots(f, multiple=True)
R2 = (-12.7530479110482, -3.85012393732929,
4.89897948556636, 7.46155167569183)
for r1, r2 in zip(R1, R2):
assert abs(r1 - r2) < 1e-10
def test_roots_preprocessed():
E, F, J, L = symbols("E,F,J,L")
f = -21601054687500000000*E**8*J**8/L**16 + \
508232812500000000*F*x*E**7*J**7/L**14 - \
4269543750000000*E**6*F**2*J**6*x**2/L**12 + \
16194716250000*E**5*F**3*J**5*x**3/L**10 - \
27633173750*E**4*F**4*J**4*x**4/L**8 + \
14840215*E**3*F**5*J**3*x**5/L**6 + \
54794*E**2*F**6*J**2*x**6/(5*L**4) - \
1153*E*J*F**7*x**7/(80*L**2) + \
633*F**8*x**8/160000
assert roots(f, x) == {}
R1 = roots(f.evalf(), x, multiple=True)
R2 = [-1304.88375606366, 97.1168816800648, 186.946430171876, 245.526792947065,
503.441004174773, 791.549343830097, 1273.16678129348, 1850.10650616851]
w = Wild('w')
p = w*E*J/(F*L**2)
assert len(R1) == len(R2)
for r1, r2 in zip(R1, R2):
match = r1.match(p)
assert match is not None and abs(match[w] - r2) < 1e-10
def test_roots_mixed():
f = -1936 - 5056*x - 7592*x**2 + 2704*x**3 - 49*x**4
_re, _im = intervals(f, all=True)
_nroots = nroots(f)
_sroots = roots(f, multiple=True)
_re = [ Interval(a, b) for (a, b), _ in _re ]
_im = [ Interval(re(a), re(b))*Interval(im(a), im(b)) for (a, b),
_ in _im ]
_intervals = _re + _im
_sroots = [ r.evalf() for r in _sroots ]
_nroots = sorted(_nroots, key=lambda x: x.sort_key())
_sroots = sorted(_sroots, key=lambda x: x.sort_key())
for _roots in (_nroots, _sroots):
for i, r in zip(_intervals, _roots):
if r.is_real:
assert r in i
else:
assert (re(r), im(r)) in i
def test_root_factors():
assert root_factors(Poly(1, x)) == [Poly(1, x)]
assert root_factors(Poly(x, x)) == [Poly(x, x)]
assert root_factors(x**2 - 1, x) == [x + 1, x - 1]
assert root_factors(x**2 - y, x) == [x - sqrt(y), x + sqrt(y)]
assert root_factors((x**4 - 1)**2) == \
[x + 1, x + 1, x - 1, x - 1, x - I, x - I, x + I, x + I]
assert root_factors(Poly(x**4 - 1, x), filter='Z') == \
[Poly(x + 1, x), Poly(x - 1, x), Poly(x**2 + 1, x)]
assert root_factors(8*x**2 + 12*x**4 + 6*x**6 + x**8, x, filter='Q') == \
[x, x, x**6 + 6*x**4 + 12*x**2 + 8]
def test_nroots1():
n = 64
p = legendre_poly(n, x, polys=True)
raises(sympy.mpmath.mp.NoConvergence, lambda: p.nroots(n=3, maxsteps=5))
roots = p.nroots(n=3)
# The order of roots matters. They are ordered from smallest to the
# largest.
assert [str(r) for r in roots] == \
['-0.999', '-0.996', '-0.991', '-0.983', '-0.973', '-0.961',
'-0.946', '-0.930', '-0.911', '-0.889', '-0.866', '-0.841',
'-0.813', '-0.784', '-0.753', '-0.720', '-0.685', '-0.649',
'-0.611', '-0.572', '-0.531', '-0.489', '-0.446', '-0.402',
'-0.357', '-0.311', '-0.265', '-0.217', '-0.170', '-0.121',
'-0.0730', '-0.0243', '0.0243', '0.0730', '0.121', '0.170',
'0.217', '0.265', '0.311', '0.357', '0.402', '0.446', '0.489',
'0.531', '0.572', '0.611', '0.649', '0.685', '0.720', '0.753',
'0.784', '0.813', '0.841', '0.866', '0.889', '0.911', '0.930',
'0.946', '0.961', '0.973', '0.983', '0.991', '0.996', '0.999']
def test_nroots2():
p = Poly(x**5 + 3*x + 1, x)
roots = p.nroots(n=3)
# The order of roots matters. The roots are ordered by their real
# components (if they agree, then by their imaginary components),
# with real roots appearing first.
assert [str(r) for r in roots] == \
['-0.332', '-0.839 - 0.944*I', '-0.839 + 0.944*I',
'1.01 - 0.937*I', '1.01 + 0.937*I']
roots = p.nroots(n=5)
assert [str(r) for r in roots] == \
['-0.33199', '-0.83907 - 0.94385*I', '-0.83907 + 0.94385*I',
'1.0051 - 0.93726*I', '1.0051 + 0.93726*I']
|
"""
Tests of printing functionality
"""
from __future__ import absolute_import, print_function, division
import logging
from nose.plugins.skip import SkipTest
import numpy as np
from six.moves import StringIO
import theano
import theano.tensor as tensor
from theano.printing import min_informative_str, debugprint
def test_pydotprint_cond_highlight():
# This is a REALLY PARTIAL TEST.
# I did them to help debug stuff.
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
f = theano.function([x], x * 2)
f([1, 2, 3, 4])
s = StringIO()
new_handler = logging.StreamHandler(s)
new_handler.setLevel(logging.DEBUG)
orig_handler = theano.logging_default_handler
theano.theano_logger.removeHandler(orig_handler)
theano.theano_logger.addHandler(new_handler)
try:
theano.printing.pydotprint(f, cond_highlight=True,
print_output_file=False)
finally:
theano.theano_logger.addHandler(orig_handler)
theano.theano_logger.removeHandler(new_handler)
assert (s.getvalue() == 'pydotprint: cond_highlight is set but there'
' is no IfElse node in the graph\n')
def test_pydotprint_return_image():
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
ret = theano.printing.pydotprint(x * 2, return_image=True)
assert isinstance(ret, (str, bytes))
def test_pydotprint_long_name():
# This is a REALLY PARTIAL TEST.
# It prints a graph where there are variable and apply nodes whose long
# names are different, but not the shortened names.
# We should not merge those nodes in the dot graph.
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
x = tensor.dvector()
mode = theano.compile.mode.get_default_mode().excluding("fusion")
f = theano.function([x], [x * 2, x + x], mode=mode)
f([1, 2, 3, 4])
theano.printing.pydotprint(f, max_label_size=5,
print_output_file=False)
theano.printing.pydotprint([x * 2, x + x],
max_label_size=5,
print_output_file=False)
def test_pydotprint_profile():
# Just check that pydotprint does not crash with profile.
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
if theano.config.mode in ("DebugMode", "DEBUG_MODE"):
raise SkipTest("Can't profile in DebugMode")
A = tensor.matrix()
prof = theano.compile.ProfileStats(atexit_print=False, gpu_checks=False)
f = theano.function([A], A + 1, profile=prof)
theano.printing.pydotprint(f, print_output_file=False)
f([[1]])
theano.printing.pydotprint(f, print_output_file=False)
def test_min_informative_str():
# evaluates a reference output to make sure the
# min_informative_str function works as intended
A = tensor.matrix(name='A')
B = tensor.matrix(name='B')
C = A + B
C.name = 'C'
D = tensor.matrix(name='D')
E = tensor.matrix(name='E')
F = D + E
G = C + F
mis = min_informative_str(G).replace("\t", " ")
reference = """A. Elemwise{add,no_inplace}
B. C
C. Elemwise{add,no_inplace}
D. D
E. E"""
if mis != reference:
print('--' + mis + '--')
print('--' + reference + '--')
assert mis == reference
def test_debugprint():
A = tensor.matrix(name='A')
B = tensor.matrix(name='B')
C = A + B
C.name = 'C'
D = tensor.matrix(name='D')
E = tensor.matrix(name='E')
F = D + E
G = C + F
mode = theano.compile.get_default_mode().including('fusion')
g = theano.function([A, B, D, E], G, mode=mode)
# just test that it work
s = StringIO()
debugprint(G, file=s)
# test ids=int
s = StringIO()
debugprint(G, file=s, ids='int')
s = s.getvalue()
# The additional white space are needed!
reference = '\n'.join([
"Elemwise{add,no_inplace} [id 0] '' ",
" |Elemwise{add,no_inplace} [id 1] 'C' ",
" | |A [id 2]",
" | |B [id 3]",
" |Elemwise{add,no_inplace} [id 4] '' ",
" |D [id 5]",
" |E [id 6]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
# test ids=CHAR
s = StringIO()
debugprint(G, file=s, ids='CHAR')
s = s.getvalue()
# The additional white space are needed!
reference = "\n".join([
"Elemwise{add,no_inplace} [id A] '' ",
" |Elemwise{add,no_inplace} [id B] 'C' ",
" | |A [id C]",
" | |B [id D]",
" |Elemwise{add,no_inplace} [id E] '' ",
" |D [id F]",
" |E [id G]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
# test ids=CHAR, stop_on_name=True
s = StringIO()
debugprint(G, file=s, ids='CHAR', stop_on_name=True)
s = s.getvalue()
# The additional white space are needed!
reference = '\n'.join([
"Elemwise{add,no_inplace} [id A] '' ",
" |Elemwise{add,no_inplace} [id B] 'C' ",
" |Elemwise{add,no_inplace} [id C] '' ",
" |D [id D]",
" |E [id E]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
# test ids=
s = StringIO()
debugprint(G, file=s, ids='')
s = s.getvalue()
# The additional white space are needed!
reference = '\n'.join([
"Elemwise{add,no_inplace} '' ",
" |Elemwise{add,no_inplace} 'C' ",
" | |A ",
" | |B ",
" |Elemwise{add,no_inplace} '' ",
" |D ",
" |E ",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
# test print_storage=True
s = StringIO()
debugprint(g, file=s, ids='', print_storage=True)
s = s.getvalue()
# The additional white space are needed!
reference = '\n'.join([
"Elemwise{add,no_inplace} '' 0 [None]",
" |A [None]",
" |B [None]",
" |D [None]",
" |E [None]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
# test clients
s = StringIO()
# We must force the mode as otherwise it can change the clients order
f = theano.function([A, B, D], [A + B, A + B - D],
mode='FAST_COMPILE')
debugprint(f, file=s, print_clients=True)
s = s.getvalue()
# The additional white space are needed!
reference = '\n'.join([
"Elemwise{add,no_inplace} [id A] '' 0 clients:[('output', ''), ('[id C]', 1)]",
" |A [id D]",
" |B [id E]",
"Elemwise{sub,no_inplace} [id C] '' 1",
" |Elemwise{add,no_inplace} [id A] '' 0 clients:[('output', ''), ('[id C]', 1)]",
" |D [id F]",
]) + '\n'
if s != reference:
print('--' + s + '--')
print('--' + reference + '--')
assert s == reference
def test_scan_debugprint1():
k = tensor.iscalar("k")
A = tensor.dvector("A")
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
final_result = result[-1]
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Subtensor{int64} [id A] ''
|Subtensor{int64::} [id B] ''
| |for{cpu,scan_fn} [id C] ''
| | |k [id D]
| | |IncSubtensor{Set;:int64:} [id E] ''
| | | |AllocEmpty{dtype='float64'} [id F] ''
| | | | |Elemwise{add,no_inplace} [id G] ''
| | | | | |k [id D]
| | | | | |Subtensor{int64} [id H] ''
| | | | | |Shape [id I] ''
| | | | | | |Rebroadcast{0} [id J] ''
| | | | | | |InplaceDimShuffle{x,0} [id K] ''
| | | | | | |Elemwise{second,no_inplace} [id L] ''
| | | | | | |A [id M]
| | | | | | |InplaceDimShuffle{x} [id N] ''
| | | | | | |TensorConstant{1.0} [id O]
| | | | | |Constant{0} [id P]
| | | | |Subtensor{int64} [id Q] ''
| | | | |Shape [id R] ''
| | | | | |Rebroadcast{0} [id J] ''
| | | | |Constant{1} [id S]
| | | |Rebroadcast{0} [id J] ''
| | | |ScalarFromTensor [id T] ''
| | | |Subtensor{int64} [id H] ''
| | |A [id M]
| |Constant{1} [id U]
|Constant{-1} [id V]
Inner graphs of the scan ops:
for{cpu,scan_fn} [id C] ''
>Elemwise{mul,no_inplace} [id W] ''
> |<TensorType(float64, vector)> [id X] -> [id E]
> |A_copy [id Y] -> [id M]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint2():
coefficients = theano.tensor.vector("coefficients")
x = tensor.scalar("x")
max_coefficients_supported = 10000
# Generate the components of the polynomial
components, updates = theano.scan(fn=lambda coefficient, power,
free_variable:
coefficient * (free_variable ** power),
outputs_info=None,
sequences=[
coefficients,
theano.tensor.arange(
max_coefficients_supported)],
non_sequences=x)
# Sum them up
polynomial = components.sum()
output_str = theano.printing.debugprint(polynomial, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Sum{acc_dtype=float64} [id A] ''
|for{cpu,scan_fn} [id B] ''
|Elemwise{minimum,no_inplace} [id C] ''
| |Subtensor{int64} [id D] ''
| | |Shape [id E] ''
| | | |Subtensor{int64::} [id F] 'coefficients[0:]'
| | | |coefficients [id G]
| | | |Constant{0} [id H]
| | |Constant{0} [id I]
| |Subtensor{int64} [id J] ''
| |Shape [id K] ''
| | |Subtensor{int64::} [id L] ''
| | |ARange{dtype='int64'} [id M] ''
| | | |TensorConstant{0} [id N]
| | | |TensorConstant{10000} [id O]
| | | |TensorConstant{1} [id P]
| | |Constant{0} [id Q]
| |Constant{0} [id R]
|Subtensor{:int64:} [id S] ''
| |Subtensor{int64::} [id F] 'coefficients[0:]'
| |ScalarFromTensor [id T] ''
| |Elemwise{minimum,no_inplace} [id C] ''
|Subtensor{:int64:} [id U] ''
| |Subtensor{int64::} [id L] ''
| |ScalarFromTensor [id V] ''
| |Elemwise{minimum,no_inplace} [id C] ''
|Elemwise{minimum,no_inplace} [id C] ''
|x [id W]
Inner graphs of the scan ops:
for{cpu,scan_fn} [id B] ''
>Elemwise{mul,no_inplace} [id X] ''
> |coefficients[t] [id Y] -> [id S]
> |Elemwise{pow,no_inplace} [id Z] ''
> |x_copy [id BA] -> [id W]
> |<TensorType(int64, scalar)> [id BB] -> [id U]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint3():
coefficients = theano.tensor.dvector("coefficients")
max_coefficients_supported = 10
k = tensor.iscalar("k")
A = tensor.dvector("A")
# compute A**k
def compute_A_k(A, k):
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result,
A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
A_k = result[-1]
return A_k
# Generate the components of the polynomial
components, updates = theano.scan(fn=lambda coefficient,
power, some_A, some_k:
coefficient *
(compute_A_k(some_A, some_k) ** power),
outputs_info=None,
sequences=[
coefficients,
theano.tensor.arange(
max_coefficients_supported)],
non_sequences=[A, k])
# Sum them up
polynomial = components.sum()
final_result = polynomial
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Sum{acc_dtype=float64} [id A] ''
|for{cpu,scan_fn} [id B] ''
|Elemwise{minimum,no_inplace} [id C] ''
| |Subtensor{int64} [id D] ''
| | |Shape [id E] ''
| | | |Subtensor{int64::} [id F] 'coefficients[0:]'
| | | |coefficients [id G]
| | | |Constant{0} [id H]
| | |Constant{0} [id I]
| |Subtensor{int64} [id J] ''
| |Shape [id K] ''
| | |Subtensor{int64::} [id L] ''
| | |ARange{dtype='int64'} [id M] ''
| | | |TensorConstant{0} [id N]
| | | |TensorConstant{10} [id O]
| | | |TensorConstant{1} [id P]
| | |Constant{0} [id Q]
| |Constant{0} [id R]
|Subtensor{:int64:} [id S] ''
| |Subtensor{int64::} [id F] 'coefficients[0:]'
| |ScalarFromTensor [id T] ''
| |Elemwise{minimum,no_inplace} [id C] ''
|Subtensor{:int64:} [id U] ''
| |Subtensor{int64::} [id L] ''
| |ScalarFromTensor [id V] ''
| |Elemwise{minimum,no_inplace} [id C] ''
|Elemwise{minimum,no_inplace} [id C] ''
|A [id W]
|k [id X]
Inner graphs of the scan ops:
for{cpu,scan_fn} [id B] ''
>Elemwise{mul,no_inplace} [id Y] ''
> |InplaceDimShuffle{x} [id Z] ''
> | |coefficients[t] [id BA] -> [id S]
> |Elemwise{pow,no_inplace} [id BB] ''
> |Subtensor{int64} [id BC] ''
> | |Subtensor{int64::} [id BD] ''
> | | |for{cpu,scan_fn} [id BE] ''
> | | | |k_copy [id BF] -> [id X]
> | | | |IncSubtensor{Set;:int64:} [id BG] ''
> | | | | |AllocEmpty{dtype='float64'} [id BH] ''
> | | | | | |Elemwise{add,no_inplace} [id BI] ''
> | | | | | | |k_copy [id BF] -> [id X]
> | | | | | | |Subtensor{int64} [id BJ] ''
> | | | | | | |Shape [id BK] ''
> | | | | | | | |Rebroadcast{0} [id BL] ''
> | | | | | | | |InplaceDimShuffle{x,0} [id BM] ''
> | | | | | | | |Elemwise{second,no_inplace} [id BN] ''
> | | | | | | | |A_copy [id BO] -> [id W]
> | | | | | | | |InplaceDimShuffle{x} [id BP] ''
> | | | | | | | |TensorConstant{1.0} [id BQ]
> | | | | | | |Constant{0} [id BR]
> | | | | | |Subtensor{int64} [id BS] ''
> | | | | | |Shape [id BT] ''
> | | | | | | |Rebroadcast{0} [id BL] ''
> | | | | | |Constant{1} [id BU]
> | | | | |Rebroadcast{0} [id BL] ''
> | | | | |ScalarFromTensor [id BV] ''
> | | | | |Subtensor{int64} [id BJ] ''
> | | | |A_copy [id BO] -> [id W]
> | | |Constant{1} [id BW]
> | |Constant{-1} [id BX]
> |InplaceDimShuffle{x} [id BY] ''
> |<TensorType(int64, scalar)> [id BZ] -> [id U]
for{cpu,scan_fn} [id BE] ''
>Elemwise{mul,no_inplace} [id CA] ''
> |<TensorType(float64, vector)> [id CB] -> [id BG]
> |A_copy [id CC] -> [id BO]"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint4():
def fn(a_m2, a_m1, b_m2, b_m1):
return a_m1 + a_m2, b_m1 + b_m2
a0 = theano.shared(np.arange(2, dtype='int64'))
b0 = theano.shared(np.arange(2, dtype='int64'))
(a, b), _ = theano.scan(
fn, outputs_info=[{'initial': a0, 'taps': [-2, -1]},
{'initial': b0, 'taps': [-2, -1]}],
n_steps=5)
final_result = a + b
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Elemwise{add,no_inplace} [id A] ''
|Subtensor{int64::} [id B] ''
| |for{cpu,scan_fn}.0 [id C] ''
| | |TensorConstant{5} [id D]
| | |IncSubtensor{Set;:int64:} [id E] ''
| | | |AllocEmpty{dtype='int64'} [id F] ''
| | | | |Elemwise{add,no_inplace} [id G] ''
| | | | |TensorConstant{5} [id D]
| | | | |Subtensor{int64} [id H] ''
| | | | |Shape [id I] ''
| | | | | |Subtensor{:int64:} [id J] ''
| | | | | |<TensorType(int64, vector)> [id K]
| | | | | |Constant{2} [id L]
| | | | |Constant{0} [id M]
| | | |Subtensor{:int64:} [id J] ''
| | | |ScalarFromTensor [id N] ''
| | | |Subtensor{int64} [id H] ''
| | |IncSubtensor{Set;:int64:} [id O] ''
| | |AllocEmpty{dtype='int64'} [id P] ''
| | | |Elemwise{add,no_inplace} [id Q] ''
| | | |TensorConstant{5} [id D]
| | | |Subtensor{int64} [id R] ''
| | | |Shape [id S] ''
| | | | |Subtensor{:int64:} [id T] ''
| | | | |<TensorType(int64, vector)> [id U]
| | | | |Constant{2} [id V]
| | | |Constant{0} [id W]
| | |Subtensor{:int64:} [id T] ''
| | |ScalarFromTensor [id X] ''
| | |Subtensor{int64} [id R] ''
| |Constant{2} [id Y]
|Subtensor{int64::} [id Z] ''
|for{cpu,scan_fn}.1 [id C] ''
|Constant{2} [id BA]
Inner graphs of the scan ops:
for{cpu,scan_fn}.0 [id C] ''
>Elemwise{add,no_inplace} [id BB] ''
> |<TensorType(int64, scalar)> [id BC] -> [id E]
> |<TensorType(int64, scalar)> [id BD] -> [id E]
>Elemwise{add,no_inplace} [id BE] ''
> |<TensorType(int64, scalar)> [id BF] -> [id O]
> |<TensorType(int64, scalar)> [id BG] -> [id O]
for{cpu,scan_fn}.1 [id C] ''
>Elemwise{add,no_inplace} [id BB] ''
>Elemwise{add,no_inplace} [id BE] ''"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_scan_debugprint5():
k = tensor.iscalar("k")
A = tensor.dvector("A")
# Symbolic description of the result
result, updates = theano.scan(fn=lambda prior_result, A: prior_result * A,
outputs_info=tensor.ones_like(A),
non_sequences=A,
n_steps=k)
final_result = tensor.grad(result[-1].sum(), A)
output_str = theano.printing.debugprint(final_result, file='str')
lines = []
for line in output_str.split('\n'):
lines += [line]
expected_output = """Subtensor{int64} [id A] ''
|for{cpu,grad_of_scan_fn}.1 [id B] ''
| |Elemwise{sub,no_inplace} [id C] ''
| | |Subtensor{int64} [id D] ''
| | | |Shape [id E] ''
| | | | |for{cpu,scan_fn} [id F] ''
| | | | |k [id G]
| | | | |IncSubtensor{Set;:int64:} [id H] ''
| | | | | |AllocEmpty{dtype='float64'} [id I] ''
| | | | | | |Elemwise{add,no_inplace} [id J] ''
| | | | | | | |k [id G]
| | | | | | | |Subtensor{int64} [id K] ''
| | | | | | | |Shape [id L] ''
| | | | | | | | |Rebroadcast{0} [id M] ''
| | | | | | | | |InplaceDimShuffle{x,0} [id N] ''
| | | | | | | | |Elemwise{second,no_inplace} [id O] ''
| | | | | | | | |A [id P]
| | | | | | | | |InplaceDimShuffle{x} [id Q] ''
| | | | | | | | |TensorConstant{1.0} [id R]
| | | | | | | |Constant{0} [id S]
| | | | | | |Subtensor{int64} [id T] ''
| | | | | | |Shape [id U] ''
| | | | | | | |Rebroadcast{0} [id M] ''
| | | | | | |Constant{1} [id V]
| | | | | |Rebroadcast{0} [id M] ''
| | | | | |ScalarFromTensor [id W] ''
| | | | | |Subtensor{int64} [id K] ''
| | | | |A [id P]
| | | |Constant{0} [id X]
| | |TensorConstant{1} [id Y]
| |Subtensor{:int64:} [id Z] ''
| | |Subtensor{::int64} [id BA] ''
| | | |Subtensor{:int64:} [id BB] ''
| | | | |for{cpu,scan_fn} [id F] ''
| | | | |Constant{-1} [id BC]
| | | |Constant{-1} [id BD]
| | |ScalarFromTensor [id BE] ''
| | |Elemwise{sub,no_inplace} [id C] ''
| |Subtensor{:int64:} [id BF] ''
| | |Subtensor{:int64:} [id BG] ''
| | | |Subtensor{::int64} [id BH] ''
| | | | |for{cpu,scan_fn} [id F] ''
| | | | |Constant{-1} [id BI]
| | | |Constant{-1} [id BJ]
| | |ScalarFromTensor [id BK] ''
| | |Elemwise{sub,no_inplace} [id C] ''
| |Subtensor{::int64} [id BL] ''
| | |IncSubtensor{Inc;int64::} [id BM] ''
| | | |Elemwise{second,no_inplace} [id BN] ''
| | | | |for{cpu,scan_fn} [id F] ''
| | | | |InplaceDimShuffle{x,x} [id BO] ''
| | | | |TensorConstant{0.0} [id BP]
| | | |IncSubtensor{Inc;int64} [id BQ] ''
| | | | |Elemwise{second,no_inplace} [id BR] ''
| | | | | |Subtensor{int64::} [id BS] ''
| | | | | | |for{cpu,scan_fn} [id F] ''
| | | | | | |Constant{1} [id BT]
| | | | | |InplaceDimShuffle{x,x} [id BU] ''
| | | | | |TensorConstant{0.0} [id BP]
| | | | |Elemwise{second} [id BV] ''
| | | | | |Subtensor{int64} [id BW] ''
| | | | | | |Subtensor{int64::} [id BS] ''
| | | | | | |Constant{-1} [id BX]
| | | | | |InplaceDimShuffle{x} [id BY] ''
| | | | | |Elemwise{second,no_inplace} [id BZ] ''
| | | | | |Sum{acc_dtype=float64} [id CA] ''
| | | | | | |Subtensor{int64} [id BW] ''
| | | | | |TensorConstant{1.0} [id R]
| | | | |Constant{-1} [id BX]
| | | |Constant{1} [id BT]
| | |Constant{-1} [id CB]
| |Alloc [id CC] ''
| | |TensorConstant{0.0} [id BP]
| | |Elemwise{add,no_inplace} [id CD] ''
| | | |Elemwise{sub,no_inplace} [id C] ''
| | | |TensorConstant{1} [id Y]
| | |Subtensor{int64} [id CE] ''
| | |Shape [id CF] ''
| | | |A [id P]
| | |Constant{0} [id CG]
| |A [id P]
|Constant{-1} [id CH]
Inner graphs of the scan ops:
for{cpu,grad_of_scan_fn}.1 [id B] ''
>Elemwise{add,no_inplace} [id CI] ''
> |Elemwise{mul} [id CJ] ''
> | |<TensorType(float64, vector)> [id CK] -> [id BL]
> | |A_copy [id CL] -> [id P]
> |<TensorType(float64, vector)> [id CM] -> [id BL]
>Elemwise{add,no_inplace} [id CN] ''
> |Elemwise{mul} [id CO] ''
> | |<TensorType(float64, vector)> [id CK] -> [id BL]
> | |<TensorType(float64, vector)> [id CP] -> [id Z]
> |<TensorType(float64, vector)> [id CQ] -> [id CC]
for{cpu,scan_fn} [id F] ''
>Elemwise{mul,no_inplace} [id CR] ''
> |<TensorType(float64, vector)> [id CP] -> [id H]
> |A_copy [id CL] -> [id P]
for{cpu,scan_fn} [id F] ''
>Elemwise{mul,no_inplace} [id CR] ''
for{cpu,scan_fn} [id F] ''
>Elemwise{mul,no_inplace} [id CR] ''
for{cpu,scan_fn} [id F] ''
>Elemwise{mul,no_inplace} [id CR] ''
for{cpu,scan_fn} [id F] ''
>Elemwise{mul,no_inplace} [id CR] ''"""
for truth, out in zip(expected_output.split("\n"), lines):
assert truth.strip() == out.strip()
def test_printing_scan():
# Skip test if pydot is not available.
if not theano.printing.pydot_imported:
raise SkipTest('pydot not available')
def f_pow2(x_tm1):
return 2 * x_tm1
state = theano.tensor.scalar('state')
n_steps = theano.tensor.iscalar('nsteps')
output, updates = theano.scan(f_pow2,
[],
state,
[],
n_steps=n_steps,
truncate_gradient=-1,
go_backwards=False)
f = theano.function([state, n_steps],
output,
updates=updates,
allow_input_downcast=True)
theano.printing.pydotprint(output, scan_graphs=True)
theano.printing.pydotprint(f, scan_graphs=True)
def test_subtensor():
x = theano.tensor.dvector()
y = x[1]
assert theano.pp(y) == "<TensorType(float64, vector)>[Constant{1}]"
|
# -*- coding: latin1 -*-
from __future__ import print_function
"""
.. currentmodule:: pylayers.antprop.rays
.. autosummary::
:members:
"""
import doctest
import os
import sys
import glob
try:
# from tvtk.api import tvtk
# from mayavi.sources.vtk_data_source import VTKDataSource
from mayavi import mlab
except:
print('Layout:Mayavi is not installed')
import pdb
import os
import copy
if sys.version_info.major==2:
import ConfigParser
else:
import configparser
import glob
import doctest
import networkx as nx
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import struct as stru
import pylayers.util.geomutil as geu
import pylayers.util.pyutil as pyu
from pylayers.util.project import *
from pylayers.antprop.interactions import *
from pylayers.antprop.slab import *
from pylayers.antprop.channel import Ctilde
from pylayers.gis.layout import Layout
import pylayers.signal.bsignal as bs
import shapely.geometry as shg
import h5py
import operator
class Rays(PyLayers, dict):
""" Class handling a set of rays
Attributes
----------
pTx : np.array
transmitter (3,)
pRx : np.array
receiver (3,)
B : IntB
B0 : IntB
I : Interactions
I.I : np.array
(f,nI,3,3)
I.T : IntT
I.T.A : np.array
(f,iT,3,3)
I.R : IntR
I.R.A : np.array
(f,iR,3,3)
I.D : IntD
I.D.A : np.array
(f,iD,3,3)
Lfilename : string
Layout name
delays : np.array
ray delays
dis : np.array
ray distance = delays*0.3
nray : int
number of rays
evaluated : boolean
are rays evaluated ?
is3D : boolean
are rays 2d or 3d rays ?
isbased : boolean
locbas has been applied ?
filles : boolean
filled has been applied ?
los : boolean
Line of sight boolean
fGHz : np.array
frequency points for evaluation
origin_sig_name : string
signature file which produces the rays
Notes
-----
The Rays object is obtained from a signature.
It is a container for a set of rays between a source
and a target point defining a radio link.
Once a Rays object has been obtained in 2D, it is transformed
in 3D via the **to3D** method. This method takes two parameters :
the height from floor to ceil, and the number N of
multiple reflections to account for.
Once the 3d rays have been calculated,
the local basis are evaluated along those rays. This is
done through the **locbas** method
Once the local basis have been calculated the different
interactions along rays can be informed via the **fillinter**
method.
Once the interactions are informed the field along rays can
be evaluated via the **eval** method
"""
def __init__(self, pTx, pRx):
""" object constructor
Parameters
----------
pTx : np.array
transmitter coordinates
pRx : np.array
receiver coordinates
"""
self.pTx = pTx
self.pRx = pRx
self.nray = 0
self.nray2D = 0
self.raypt = 0
self.los = False
self.is3D = False
self.isbased = False
self.filled = False
self.evaluated = False
def __len__(self):
Nray = 0
for k in self.keys():
sh = np.shape(self[k]['sig'])
Nray = Nray + sh[2]
return Nray
# def __add__(self,r):
# if (not r.is3D) and (not r.isbased) and (not self.is3D) and (not self.isbased) :
# raise AttributeError('both Ray structures must be 3D and based to be added')
# for ni in r:
# if self.has_key(ni):
# import ipdb
# ipdb.set_trace()
# # check if som rays already exists
# # if so, don't add them
# lur = np.array([])
# for ur in range(self[ni]['pt'].shape[2]):
# udifferent = np.where(np.all(np.all(r[ni]['pt'][...,ur][...,None]!=self[ni]['pt'],axis=0),axis=0))[0]
# lur = np.hstack((lur,udifferent ))
# import ipdb
# ipdb.set_trace()
# self[ni]['pt'] = np.concatenate((self[ni]['pt'],r[ni]['pt']),axis=2)
# self[ni]['sig'] = np.concatenate((self[ni]['sig'],r[ni]['sig']),axis=2)
# self[ni]['si'] = np.concatenate((self[ni]['si'],r[ni]['si']),axis=1)
# self[ni]['rayidx'] = np.concatenate((self[ni]['rayidx'],r[ni]['rayidx']),axis=0)
# self[ni]['dis'] = np.concatenate((self[ni]['dis'],r[ni]['dis']),axis=0)
# self[ni]['vsi'] = np.concatenate((self[ni]['vsi'],r[ni]['vsi']),axis=1)
# self[ni]['nbrays'] += 1
# if ni != 0:
# self[ni]['BiN'] = np.concatenate((self[ni]['BiN'],r[ni]['BiN']),axis=2)
# self[ni]['Bi'] = np.concatenate((self[ni]['Bi'],r[ni]['Bi']),axis=3)
# self[ni]['Bo'] = np.concatenate((self[ni]['Bo'],r[ni]['Bo']),axis=3)
# self[ni]['Bo0'] = np.concatenate((self[ni]['Bo0'],r[ni]['Bo0']),axis=2)
# self[ni]['scpr'] = np.concatenate((self[ni]['scpr'],r[ni]['scpr']),axis=1)
# self[ni]['norm'] = np.concatenate((self[ni]['norm'],r[ni]['norm']),axis=2)
# self[ni]['B'] = np.concatenate((self[ni]['B'],r[ni]['B']),axis=3)
# self[ni]['aod'] = np.concatenate((self[ni]['aod'],r[ni]['aod']),axis=1)
# self[ni]['aoa'] = np.concatenate((self[ni]['aoa'],r[ni]['aoa']),axis=1)
# self[ni]['theta'] = np.concatenate((self[ni]['theta'],r[ni]['theta']),axis=1)
# if r[ni].has_key('diffidx'):
# if self[ni].has_key('diffidx'):
# self[ni]['diffidx'] = np.concatenate((self[ni]['diffidx'],r[ni]['diffidx']))
# self[ni]['diffvect'] = np.concatenate((self[ni]['diffvect'],r[ni]['diffvect']),axis=1)
# self[ni]['diffslabs'].append(r[ni]['diffslabs'])
# else:
# self[ni]['diffidx'] = r['diffidx']
# self[ni]['diffvect'] = r['diffvect']
# self[ni]['diffslabs'] = r['diffslabs']
# else:
# self[ni]=r[ni]
def __repr__(self):
s = ''
ni = 0
nl = 0
lgi = list(self.keys())
lgi.sort()
if self.is3D:
s = self.__class__.__name__ + '3D\n' + '----------'+'\n'
for k in lgi:
r = self[k]['rayidx']
nr = len(r)
s = s + str(k)+' / '+str(nr)+ ' : '+str(r)+'\n'
ni = ni + nr*k
nl = nl + nr*(2*k+1)
nray2D = self.nray2D
else:
s = self.__class__.__name__ + '2D\n' + '----------'+'\n'
nray2D = len(self)
if self.los:
s = s + "LOS "
if self.isbased:
s = s + "based "
if self.filled:
s = s + "filled "
s = s + '\n'
s = s + 'N2Drays : '+ str(nray2D) + '\n'
if hasattr(self,'nb_origin_sig'):
s = s + 'from '+ str(self.nb_origin_sig) + ' signatures\n'
s = s + '#Rays/#Sig: '+ str(nray2D/(1.*self.nb_origin_sig) )
s = s + '\npTx : '+ str(self.pTx) + '\npRx : ' + str(self.pRx)+'\n'
if not self.is3D:
ray_cpt = 0
for k in lgi:
#sk = np.shape(self[k]['sig'])[2]
s = s + str(k) + ':\n'
sig = self[k]['sig'][0,:]
sha0 = sig.shape[0]
sha1 = sig.shape[1]
#pdb.set_trace()
for l in np.arange(sha1):
s = s + ' '+str(ray_cpt)+':'
ray_cpt +=1
for n in np.arange(sha0):
s = s + ' '+str(sig[n,l])
s = s+'\n'
#pdb.set_trace()
#s = s + str(sk) + 'rays with' + str(k) + ' interactions'
return(s)
def saveh5(self,idx=0):
""" save rays in hdf5 format
Parameters
----------
idx : int
See Also
--------
loadh5
"""
filename = self.filename+'_'+str(idx)
filenameh5=pyu.getlong(filename+'.h5',pstruc['DIRR3D'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
f=h5py.File(filenameh5,'w')
# keys not saved as attribute of h5py file
notattr = ['I','B','B0','delays','dis']
for a in self.__dict__.keys():
if a not in notattr:
f.attrs[a]=getattr(self,a)
for k in self.keys():
f.create_group(str(k))
for kk in self[k].keys():
if kk == 'sig2d':
# Need to find an efficient way to save the signatures
# 2d which have created the rays
pass
elif kk == 'nbrays':
f[str(k)].create_dataset(kk,shape=(1,),data=np.array([self[k][kk]]))
else:
f[str(k)].create_dataset(kk,shape=np.shape(self[k][kk]),data=self[k][kk])
f.close()
except:
f.close()
raise NameError('Rays: issue when writting h5py file')
print(filenameh5)
def loadh5(self,filename=[],idx=0):
""" load rays hdf5 format
Parameters
----------
idx : int
"""
if filename == []:
filenameh5 = self.filename+'_'+str(idx)+'.h5'
else :
filenameh5 = filename
filename=pyu.getlong(filenameh5,pstruc['DIRR3D'])
print(filename)
# try/except to avoid loosing the h5 file if
# read/write error
try:
f = h5py.File(filename,'r')
for k in f.keys():
self.update({eval(k):{}})
for kk in f[k].keys():
self[eval(k)].update({kk:f[k][str(kk)][:]})
for a,va in f.attrs.items():
setattr(self,a,va)
f.close()
except:
f.close()
raise NameError('Rays: issue when reading h5py file')
# fill if save was filled
# temporary solution in order to avoid
# creating save for Interactions classes
if self.filled:
#Lname = self.Lfilename
Lname = '_'.join(self.filename.split('_')[0:-1]) + '.lay'
#Lname = self.filename.split('_')[0] + '.lay'
L=Layout(Lname)
self.fillinter(L)
if self.evaluated:
return self.val(self.fGHz)
def _saveh5(self,filenameh5,grpname):
""" Save rays h5py format compliant with Links Class
Parameters
----------
filenameh5 : string
filename of the h5py file (from Links Class)
grpname : string
groupname of the h5py file (from Links Class)
See Also
--------
pylayers.simul.links
"""
filenameh5=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# try/except to avoid loosing the h5 file if
# read/write error
#try:
fh5=h5py.File(filenameh5,'a')
if self.is3D:
if not grpname in fh5['ray'].keys():
fh5['ray'].create_group(grpname)
else :
print('ray/'+grpname +'already exists in '+filenameh5)
f = fh5['ray/'+grpname]
else:
if not grpname in fh5['ray2'].keys():
fh5['ray2'].create_group(grpname)
else :
print('ray2/'+grpname +'already exists in '+filenameh5)
f = fh5['ray2/'+grpname]
# keys not saved as attribute of h5py file
notattr = ['I','B','B0','dis']
for a in self.__dict__.keys():
if a not in notattr:
if type(a)==str:
a.encode('utf-8')
if a=='_luw':
la = [ x.encode('utf8') for x in getattr(self,a) ]
f.attrs[a] = la
else:
f.attrs[a] = getattr(self,a)
for k in self.keys():
f.create_group(str(k))
for kk in self[k].keys():
if kk == 'sig2d':
# Need to find an efficient way to save the signatures
# 2d which have created the rays
pass
elif kk == 'nbrays':
f[str(k)].create_dataset(kk,shape=(1,),data=np.array([self[k][kk]]))
else:
if kk=='diffslabs':
ldiffslabs = [ x.encode('utf8') for x in self[k][kk] ]
f[str(k)].create_dataset(kk,shape=np.shape(self[k][kk]),data=ldiffslabs)
else:
f[str(k)].create_dataset(kk,shape=np.shape(self[k][kk]),data=self[k][kk])
fh5.close()
#except:
# fh5.close()
# raise NameError('Rays: issue when writting h5py file')
def _loadh5(self,filenameh5,grpname,**kwargs):
""" load rays h5py format compliant with Links Class
Parameters
----------
filenameh5 : string
filename of the h5py file (from Links Class)
grpname : string
groupname of the h5py file (from Links Class)
kwargs may contain a L: layout object
if L = [] the layout is loaded from the layout name stored
into the h5 file
if L = Layout the layout passed in arg is used
See Also
--------
pylayers.simul.links
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5=h5py.File(filename,'r')
if self.is3D:
argfile = 'ray/'+grpname
else:
argfile = 'ray2/'+grpname
f = fh5[argfile]
for k in f.keys():
self.update({eval(k):{}})
for kk in f[k].keys():
self[eval(k)].update({kk:f[k][str(kk)][:]})
for a,va in f.attrs.items():
setattr(self,a,va)
fh5.close()
except:
fh5.close()
raise NameError('Rays: issue when reading h5py file')
# fill if save was filled
# temporary solution in order to avoid
# creating save for Interactions classes
if self.filled:
if 'L' in kwargs:
self.L=kwargs['L']
else:
self.L = Layout(self.Lfilename,bbuild=True)
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
# L=Layout(self.Lfilename,bbuild=True)
self.fillinter(self.L)
# if self.evaluated:
# return self.eval(self.fGHz)
def reciprocal(self):
""" switch tx and rx
"""
r = Rays(self.pRx,self.pTx)
r.is3D = self.is3D
r.nray = self.nray
r.origin_sig_name = self.origin_sig_name
r.nb_origin_sig = self.nb_origin_sig
for k in self:
r[k]={}
r[k]['pt']=self[k]['pt'][:,::-1,:]
r[k]['sig']=self[k]['sig'][:,::-1,:]
return(r)
def check_reciprocity(self,r):
""" check ray reciprocity in comparing two reciprocal rays
Parameters
----------
r : rays reciprocal to self
"""
# permutation of all termination points
assert (self.pTx==r.pRx).all()
assert (self.pRx==r.pTx).all()
# for all group of interctions
for k in self:
# same distances
assert (np.allclose(self[k]['dis'],r[k]['dis']))
# same points when reading from right to left
assert (np.allclose(self[k]['pt'],r[k]['pt'][:,::-1,:]))
# same signature reading from right to left
assert (np.allclose(self[k]['sig'],r[k]['sig'][:,::-1,:]))
# if local basis have been evaluated
if (self.isbased) & (r.isbased):
#assert (np.allclose(self[k]['nstrwall'],r[k]['nstrwall'][:,::-1,:]))
assert (np.allclose(self[k]['norm'],r[k]['norm'][:,::-1,:])), 'interaction block:' + str(k)
#assert ((np.mod(self[k]['aoa']-r[k]['aod'],2*np.pi)==0).all())
#assert ((np.mod(self[k]['aod']-r[k]['aoa'],2*np.pi)==0).all())
# 1st output basis is equal to last input basis of the reciprocal ray
assert (np.allclose(self[k]['Bo0'],r[k]['BiN'])), 'interaction block:' + str(k)
# last input basis is equal to 1st output basis of the reciprocal ray
assert (np.allclose(self[k]['BiN'],r[k]['Bo0'])), 'interaction block:' + str(k)
# vsi vectors are inversed
assert (np.allclose(self[k]['vsi'],-r[k]['vsi'][:,::-1,:])), 'interaction block:' + str(k)
assert (np.allclose(abs(self[k]['scpr']),abs(r[k]['scpr'][::-1,:]))), 'interaction block:' + str(k)
assert (np.allclose(self[k]['theta'],r[k]['theta'][::-1,:])), 'interaction block:' + str(k)
assert (np.allclose(self[k]['Bi'],r[k]['Bo'][:,:,::-1,:])), 'interaction block:' + str(k)
assert (np.allclose(self[k]['Bo'],r[k]['Bi'][:,:,::-1,:])), 'interaction block:' + str(k)
assert (np.allclose(self[k]['B'],r[k]['B'][:,:,::-1,:].swapaxes(0,1))), 'interaction block:' + str(k)
if self.evaluated :
for ir in range(self.nray):
iint1 = self.ray(ir)
iint2 = r.ray(ir)
# check Interactions
A1 = self.I.I[:, iint1, :, :]
A2 = r.I.I[:, iint2, :, :][:,::-1,:,:]
assert np.allclose(A1,A2),pdb.set_trace()
# check bases
# ray 1 : B0 | B[0] | B[1] | B[2] | B[3] | B[4]
# ray 2 : B[4] | B[3] | B[2] | B[1] | B[0] | B0
assert np.allclose(self.B0.data[ir,:,:],r.B.data[iint2,:,:][-1,:,:].swapaxes(1,0))
assert np.allclose(r.B0.data[ir,:,:],self.B.data[iint1,:,:][-1,:,:].swapaxes(1,0))
assert np.allclose(self.B.data[iint1,:,:][:-1],r.B.data[iint2,:,:][:-1][::-1,:,:].swapaxes(2,1))
def sort(self):
""" sort rays
TODO : not finished
"""
u = np.argsort(self.dis)
def rayfromtyp_order(self,nD=[1],nR=[1],nT=[1],llo='&&'):
"""
Return rays from a given type (R|T|D) to a given order
( number of interaction)
list logic operator : llo ['op0op1']
nD <op0> nR <op1> nT
Parameters
----------
nD = list|int
requested number of Diffraction
nR = list|int
requested number of Reflection
nT = list|int
requested number of Transmission
llo = list logic operator [op0,op1]
nD <op0> nR <op1> nT
Returns
-------
lr : list
list of ray index matching the typ & order conditions
"""
if not isinstance(nD,list):
nD=[nD]
if not isinstance(nR,list):
nR=[nR]
if not isinstance(nT,list):
nT=[nT]
op = {'and':operator.and_,
'or':operator.or_,
'&':operator.and_,
'|':operator.or_,
}
lr=[]
for ur,r in enumerate(range(self.nray)):
li = self.ray2ityp(r)
nRli = li.count('R')
nTli = li.count('T')
nDli = li.count('D')
cD = (nDli in nD)
cR = (nRli in nR)
cT = (nTli in nT)
# if (nDli in nD) and (nRli in nR) and (nTli in nT) :
if op[llo[1].lower()]( op[llo[0].lower()](cD,cR) , cT):
lr.append(r)
elif (self.los) and (1 in nT ) and (0 in nD) and (0 in nR) and (ur == 0):
lr.append(r)
return lr
def extract_typ_order(self,L,nD=[1],nR=[1],nT=[1],llo='&&'):
""" Extract group of rays from a certain type (R|T|D)
at a order ( <=> given number of interaction)
list logic operator : llo [op0,op1]
nD <op0> nR <op1> nT
Parameters
----------
L : Layout
nD = list|int
requested number of Diffraction
nR = list|int
requested number of Reflection
nT = list|int
requested number of Transmission
llo = list logic operator [op0,op1]
nD <op0> nR <op1> nT
Returns
-------
R : Rays object
New Rays object containing rays matching
the typ/order conditions
"""
lr = self.rayfromtyp_order(nD=nD,nR=nR,nT=nT,llo=llo)
return self.extract(lr,L)
def extract(self,lnr,L):
""" Extract a group of rays
Parameters
----------
lnr : list of rays indexes
L : Layout
"""
if not isinstance(lnr,list):
lnr=[lnr]
r = Rays(self.pTx,self.pRx)
r.is3D = self.is3D
for unr,nr in enumerate(lnr):
#r.nray2D =
#r.nb_origin_sig = 1
ni = self.ray2nbi(nr)
ur = np.where(self[ni]['rayidx']==nr)[0][0]
if ni == 0:
los = True
else:
los = False
if 'D' in self.typ(nr):
diff=True
else:
diff=False
if 'diffvect' in self[ni]:
# check if the ray has diffraction interaction
inter = self.ray2iidx(nr)[:,0]
uD = np.where([i in inter for i in self[ni]['diffidx']])[0]
else:
uD=[]
diffkey = ['diffvect','diffidx','diffslabs']
cray = {}
for k in self[ni].keys():
if ni ==0:
cray = self[ni]
break
elif k not in ['nbrays','rayidx','dis','nstrwall','nstrswall']:
tab = self[ni][k]
if type(tab)==np.ndarray and k not in diffkey:
try:
cray[k] = tab[...,ur][...,np.newaxis]
except:
import ipdb
ipdb.set_trace()
if diff :
if k in diffkey :
if k != 'diffslabs':
cray[k]=tab[...,uD][...,np.newaxis]
else:
if len(uD)>0 :
cray[k]=[tab[uD]]
else:
cray[k]=[]
cray['nbrays'] = unr+1 # keep only one ray
r.nray = unr+1
#cray['rayidx']=np.array([self[ni]['rayidx'][nr]]) # ray index in the whole structure
cray['rayidx'] = np.array([unr])
cray['dis'] = np.array([self[ni]['dis'][ur]])
if ni in r:
# R[ni]['sig2d'].append(self[k]['sig2d'][ur])
if not los :
r[ni]['BiN'] = np.concatenate((r[ni]['BiN'],cray['BiN']),axis=2)
r[ni]['Bo'] = np.concatenate((r[ni]['Bo'],cray['Bo']),axis=3)
r[ni]['Bi'] = np.concatenate((r[ni]['Bi'],cray['Bi']),axis=3)
if diff:
if 'diffidx' in r[ni]:
r[ni]['diffidx'] = np.concatenate((r[ni]['diffidx'],cray['diffidx']))
r[ni]['diffvect'] = np.concatenate((r[ni]['diffvect'],cray['diffvect']),axis=1)
r[ni]['diffslabs'].append(cray['diffslabs'])
else:
r[ni]['diffidx'] = cray['diffidx']
r[ni]['diffvect'] = cray['diffvect']
r[ni]['diffslabs'] = cray['diffslabs']
r[ni]['nbrays'] += 1
r[ni]['B'] = np.concatenate((r[ni]['B'], cray['B']), axis=3)
r[ni]['pt'] = np.concatenate((r[ni]['pt'], cray['pt']), axis=2)
r[ni]['rayidx'] = np.concatenate((r[ni]['rayidx'], cray['rayidx']), axis=0)
r[ni]['Bo0'] = np.concatenate((r[ni]['Bo0'],cray['Bo0']), axis=2)
r[ni]['scpr'] = np.concatenate((r[ni]['scpr'], cray['scpr']), axis=1)
r[ni]['aod'] = np.concatenate((r[ni]['aod'], cray['aod']), axis=1)
r[ni]['si'] = np.concatenate((r[ni]['si'], cray['si']), axis=1)
r[ni]['sig'] = np.concatenate((r[ni]['sig'], cray['sig']), axis=2)
# r[ni]['sig2d'] = np.concatenate((r[ni]['sig2d'],cray['sig2d']),axis=2)
r[ni]['aoa'] = np.concatenate((r[ni]['aoa'], cray['aoa']), axis=1)
r[ni]['vsi'] = np.concatenate((r[ni]['vsi'], cray['vsi']), axis=2)
r[ni]['theta'] = np.concatenate((r[ni]['theta'], cray['theta']), axis=1)
r[ni]['norm'] = np.concatenate((r[ni]['norm'], cray['norm']), axis=2)
r[ni]['dis'] = np.concatenate((r[ni]['dis'], cray['dis']), axis=0)
else:
r[ni] = cray
# r[ni]['rays'] = to be done HERE
r.locbas(L)
r.fillinter(L)
return(r)
def extract_old(self,nr,L):
""" Extract a single ray
Parameters
----------
nr : ray index
L : Layout
"""
r = Rays(self.pTx,self.pRx)
r.is3D = self.is3D
r.nray2D = 1
r.nb_origin_sig = 1
#ni = self._ray2nbi[nr]
#ur = np.where(self[ni]['rayidx']==nr)[0][0]
ni,ur = self.ir2a(nr)
if 'D' in self.typ(nr):
diff=True
else:
diff=False
if 'diffvect' in self[ni]:
# check if the ray has diffraction interaction
inter = self.ray2iidx(nr)[:,0]
uD = np.where([i in inter for i in self[ni]['diffidx']])[0]
else:
uD=[]
diffkey = ['diffvect','diffidx','diffslabs']
r[ni] = {}
for k in self[ni].keys():
if k not in ['nbrays','rayidx','dis','nstrwall','nstrswall']:
tab = self[ni][k]
if type(tab)==np.ndarray and k not in diffkey:
r[ni][k] = tab[...,ur][...,np.newaxis]
if diff :
if k in diffkey :
if k != 'diffslabs':
r[ni][k]=tab[...,uD][...,np.newaxis]
else:
if len(uD)>0 :
r[ni][k]=tab[uD]
else:
r[ni][k]=[]
r[ni]['nrays'] = 1 # keep only one ray
r.nray = 1
#r[ni]['rayidx']=np.array([self[ni]['rayidx'][nr]]) # ray index in the whole structure
r[ni]['rayidx'] = np.array([0])
r[ni]['dis'] = np.array([self[ni]['dis'][ur]])
r.locbas(L)
r.fillinter(L)
return(r)
def show(self,**kwargs):
""" plot 2D rays within the simulated environment
Parameters
----------
rlist : list (default []= all rays)
list of indices of ray in interaction group
graph : string t
type of graph to be displayed
's','r','t',..
fig : figure
ax : axis
L : Layout
alpha : float
1
linewidth : float
0.1
color : string
'black'
ms : int
marker size : 5
layout : boolean
True
points : boolean
True
ER : ray energy
"""
defaults = {'rlist': [],
'fig': [],
'ax': [],
'L': [],
'graph': 's',
'color': 'black',
'alpha': 1,
'linewidth': 0.5,
'ms': 5,
'vmin':0,
'vmax':-70,
'cmap': plt.cm.hot_r,
'layout': True,
'points': True,
'labels': False,
'bcolorbar': False
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
if kwargs['fig'] ==[]:
fig = plt.figure()
if kwargs['ax'] ==[]:
ax = fig.add_subplot(111)
#
# display the Layout
#
if kwargs['layout'] == True:
if kwargs['L'] != []:
fig,ax = kwargs['L'].showG(**kwargs)
else :
raise AttributeError('Please give a Layout file as argument')
else:
fig = kwargs['fig']
ax = kwargs['ax']
#
# display Tx and Rx
#
if kwargs['points'] ==True:
ax.plot(self.pTx[0], self.pTx[1], 'or',ms=kwargs['ms'])
ax.plot(self.pRx[0], self.pRx[1], 'og',ms=kwargs['ms'])
# i=-1 all rays
# else block of interactions i
# plot all rays
if kwargs['rlist'] == []:
# list of group of interactions
lgrint = self.keys()
for i in lgrint:
# list of rays
lray = range(len(self[i]['pt'][0, 0, :]))
#if self.filled :
# ax.set_title('rays index :'+ str(self[i]['rayidx']))
for j in lray:
addr_ray = (i,j)
index_ray = self.a2ir(addr_ray)
ray = np.hstack((self.pTx[0:2].reshape((2, 1)),
np.hstack((self[i]['pt'][0:2, :, j],
self.pRx[0:2].reshape((2, 1))))
))
if 'ER' not in kwargs:
ax.plot(ray[0, :], ray[1, :],
alpha = kwargs['alpha'],
color = kwargs['color'],
linewidth = kwargs['linewidth'])
else:
EdB = 10*np.log10(ER[index_ray])
ERdB = 10*np.log10(E)
vscale = 1.-(max(ERdB)-EdB)/(max(ERdB)-min(ERdB))
linewidth = 3*vscale
alpha = vscale
cmap = cm.hot
color = cmap(vscale)
ax.plot(ray[0, :], ray[1, :],
alpha = alpha,
color = color,
linewidth = linewidth)
ax.axis('off')
#if self.filled :
# ax.set_title('rays index :'+ str(self[i]['rayidx'][lray]))
else:
rlist = kwargs['rlist']
# 3D ray
if self.is3D:
nbi = self._ray2nbi[rlist]
nr = np.array((nbi,rlist))
unb = np.unique(nr[0,:])
unr = {int(i):np.where(nr[0,:]==i)[0] for i in unb}
for i in unb:
raynb = (nr[1,unr[i]]).astype(int)
nbr = len(raynb)
ptidx = [np.where(self[i]['rayidx']==x)[0][0] for x in raynb]
for j in ptidx:
ray = np.hstack((self.pTx[0:2].reshape((2, 1)),
np.hstack((self[i]['pt'][0:2, :, j],
self.pRx[0:2].reshape((2, 1))))
))
ax.plot(ray[0, :], ray[1, :],
alpha = kwargs['alpha'],
color = kwargs['color'],
linewidth = kwargs['linewidth'])
ax.axis('off')
# 2D ray
else:
for i in rlist:
lray = range(len(self[i]['pt'][0, 0, :]))
#if self.filled :
# ax.set_title('rays index :'+ str(self[i]['rayidx']))
for j in lray:
ray = np.hstack((self.pTx[0:2].reshape((2, 1)),
np.hstack((self[i]['pt'][0:2, :, j],
self.pRx[0:2].reshape((2, 1))))
))
ax.plot(ray[0, :], ray[1, :],
alpha=kwargs['alpha'],
color=kwargs['color'],
linewidth=kwargs['linewidth'])
ax.axis('off')
if kwargs['bcolorbar']:
# axes : left , bottom , width , height
sm = plt.cm.ScalarMappable(cmap = kwargs['cmap'], norm = plt.Normalize(vmin=kwargs['vmin'],vmax=kwargs['vmax']))
sm._A = [] # necessary set_array
cax = fig.add_axes([0.18,0.35, 0.35, 0.025])
#cb = plt.colorbar(sm,cax=cax,orientation='horizontal')
cb = plt.colorbar(sm,cax=cax,orientation='horizontal')
cb.ax.tick_params(labelsize=24)
cb.set_label('Level (dB)', fontsize=24)
return(fig,ax)
def mirror(self, H=3, N=1, za = [], zb= []):
""" mirror a ray termination
Parameters
----------
H : float
ceil height (default 3m)
if H=0 only floor reflection is calculated (outdoor case)
if H=-1 floor and ceil reflection are inhibited (2D test case)
N : int
handle the number of mirror reflexions
za : float
height of the point where the parametrization starts ( e.g. pTx[2])
zb : float
height of the point where the parametrization ends ( e.g. pRx[2])
Returns
-------
d : dict
k : zm v: alpham
k : zp v: alphap
Examples
--------
>>> ptx = np.array([1,1,1.5])
>>> prx = np.array([2,2,1.2])
>>> r = Rays(ptx,prx)
>>> d = r.mirror()
>>> d[-1.5]
array([ 0.55555556])
Notes
-----
d is a dictionnary whose keys are heights along the vertical from where
are emanating the reflected rays. Values of d are the parameterization
(0< () <1) along the ray where are situated the different reflection
points.
"""
km = np.arange(-N+1, N+1, 1)
kp = np.arange(-N, N+1, 1)
#
# heights of transmitter and receiver
#
if za == []:
za=self.pTx[2]
if zb == []:
zb=self.pRx[2]
ht = za
hr = zb
assert (hr<H or H==0 or H == -1),"mirror : receiver higher than ceil height"
assert (ht<H or H==0 or H == -1),"mirror : transmitter higher than ceil height"
zkp = 2*kp*H + ht
zkm = 2*km*H - ht
d = {}
if H>0:
for zm in zkm:
if zm < 0:
bup = H
pas = H
km = int(np.ceil(zm/H))
else:
bup = 0
pas = -H
km = int(np.floor(zm/H))
thrm = np.arange(km*H, bup, pas)
d[zm] = abs(thrm-zm)/abs(hr-zm)
for zp in zkp:
if zp < 0:
bup = H
pas = H
kp = int(np.ceil(zp/H))
else:
bup = 0
pas = -H
kp = int(np.floor(zp/H))
thrp = np.arange(kp*H, bup, pas)
d[zp] = abs(thrp-zp)/abs(hr-zp)
elif H==0:
d[-ht] = np.array([ht/(ht+hr)])
d[ht] = np.array([])
elif H==-1:
d[ht] = np.array([])
# print "zp",zp
# print "kp",kp
# print "thrp",thrp
# print "alphap",d[zp]
return d
def to3D(self, L, H=3, N=1, rmoutceilR=True):
""" transform 2D ray to 3D ray
Parameters
----------
L : Layout object
H : float
ceil height (default 3m)
if H= 0 only floor reflection is calculated (outdoor case)
if H=-1 floor and ceil reflection are inhibited (2D test case)
N : int
number of mirror reflexions
rmoutceilR : bool
Remove ceil reflexions in cycles (Gt nodes)
with indoor=False attribute
Returns
-------
r3d : Rays
See Also
--------
mirror
"""
if H==-1:
rmoutceilR=False
tx = self.pTx
rx = self.pRx
#
# Phase 1 : calculate Tx images height and parameterization in the
# vertical plane
#
d = self.mirror(H=H, N=N, za=tx[2], zb=rx[2])
#
# Elimination of invalid diffraction point
# If the diffaction point is a separation between 2 air wall
# it should be removed.
#
# Phase 2 : calculate 2D parameterization in the horizontal plane
#
# for all group of interactions
for i in self:
pts = self[i]['pt'][0:2, :, :]
sig = self[i]['sig']
if pts.shape[2]!=0:
# broadcasting of t and r
t = self.pTx[0:2].reshape((2, 1, 1)) * \
np.ones((1, 1, len(pts[0, 0, :])))
r = self.pRx[0:2].reshape((2, 1, 1)) * \
np.ones((1, 1, len(pts[0, 0, :])))
pts1 = np.hstack((t, np.hstack((pts, r))))
else:
t = self.pTx[0:2].reshape((2, 1, 1))
r = self.pRx[0:2].reshape((2, 1, 1))
pts1 = np.hstack((t,r))
# append t and r to interaction points in 2D
si1 = pts1[:, 1:, :] - pts1[:, :-1, :]
# array of all ray segments distances
si = np.sqrt(np.sum(si1 * si1, axis=0))
# array of cumulative distance of 2D ray
al1 = np.cumsum(si, axis=0)
# initialize parameterization parameter alpha
self[i]['alpha'] = np.zeros(np.shape(si[:-1, :]))
for j in range(len(self[i]['alpha'][:, 0])):
# get alpha
self[i]['alpha'][j, :] = np.sum(si[0:j+1, :], axis=0) \
/np.sum(si, axis=0)
# get z coordinate
self[i]['pt'][2, j, :] = tx[2] + self[i]['alpha'][j, :] \
* (rx[2] - tx[2])
#
# Phase 3 : Initialize 3D rays dictionnary
#
r3d = Rays(tx, rx)
r3d.los = self.los
r3d.is3D = True
r3d.nray2D = len(self)
r3d.nb_origin_sig = self.nb_origin_sig
#
# Phase 4 : Fill 3D rays information
#
# Two nested loops
#
# for all interaction group
# for all type of 3D rays
# 1) extension
# 2) sort
# 3) coordinates as a function of parameter
#
for k in self: # for all interaction group k
# k = int(k)
# Number of rays in interaction group k
Nrayk = np.shape(self[k]['alpha'])[1]
# get 2D horizontal parameterization
a1 = self[k]['alpha']
#if (k==1):
# pdb.set_trace()
# get 2D signature
sig = self[k]['sig']
#print "signatures 2D ",sig
#print "----"
sigsave = copy.copy(sig)
# add parameterization of tx and rx (0,1)
a1 = np.concatenate((np.zeros((1, Nrayk)), a1, np.ones((1, Nrayk))))
# reshape signature in adding tx and rx
if sig.shape[0]!=0:
sig = np.hstack((np.zeros((2, 1, Nrayk), dtype=int),
sig,
np.zeros((2, 1, Nrayk), dtype=int))) # add signature of Tx and Rx (0,0))
else:
sig = np.hstack((np.zeros((2, 1, Nrayk), dtype=int),
np.zeros((2, 1, Nrayk), dtype=int)))
# broadcast tx and rx
Tx = tx.reshape(3, 1, 1)*np.ones((1, 1, Nrayk))
Rx = rx.reshape(3, 1, 1)*np.ones((1, 1, Nrayk))
if k!=0:
# pte is the sequence of point in 3D ndim =3 ( ndim x k x Nrayk)
pte = self[k]['pt']
# ndim x k+2 x Nrayk
pte = np.hstack((Tx, pte, Rx))
else:
pte = np.hstack((Tx, Rx))
# extension
for l in d: # for each vertical pattern (C,F,CF,FC,....)
#print k,l,d[l]
Nint = len(d[l]) # number of additional interaction
#if ((k==1) & (l==5.0)):print
if Nint > 0: # if new interaction ==> need extension
# a1e : extended horizontal+vertical parameterization
a1e = np.concatenate((a1, d[l].reshape(len(d[l]), 1)*
np.ones((1, Nrayk))))
# get sorted indices
ks = np.argsort(a1e, axis=0)
# a1es : extended sorted horizontal + vertical parameterization
a1es = np.sort(a1e, axis=0)
# #### Check if it exists the same parameter value in the horizontal plane
# #### and the vertical plane. Move parameter if so.
da1es = np.diff(a1es,axis=0)
pda1es = np.where(da1es<1e-10)
a1es[pda1es]=a1es[pda1es]-1e-3
# prepare an extended sequence of points ( ndim x (Nint+k+2) x Nrayk )
ptee = np.hstack((pte, np.zeros((3, Nint, Nrayk))))
#
# Boolean ceil/floor detector
#
# u is 4 (floor interaction )
# 5 (ceil interaction )
# depending on the vertical pattern l.
#
# l <0 corresponds to last reflexion on floor
# l >0 corresponds to last reflexion on ceil
#
# u =0 (floor) or 1 (ceil)
# if l < 0:
# u = np.mod(range(Nint), 2)
# else:
# u = 1 - np.mod(range(Nint), 2)
if l < 0 and Nint%2 ==1: # l<0 Nint odd
u = np.mod(range(Nint), 2)
elif l > 0 and Nint%2 ==1: # l>0 Nint odd
u = 1 - np.mod(range(Nint), 2)
elif l < 0 and Nint%2 ==0: # l<0 Nint even
u = 1 - np.mod(range(Nint), 2)
elif l > 0 and Nint%2 ==0: # l>0 Nint even
u = np.mod(range(Nint), 2)
#
u = u + 4
#
# At that point we introduce the signature of the new
# introduced points on the ceil and/or floor.
#
# A signature is composed of two lines
# esigs sup line : interaction number
# esigi inf line : interaction type
#
esigs = np.zeros((1, Nint, Nrayk), dtype=int)
esigi = u.reshape(1, Nint, 1)* np.ones((1, 1, Nrayk), dtype=int)
# esig : extension of the signature
esig = np.vstack((esigs, esigi))
# sige : signature extended ( 2 x (Nint+k+2) x Nrayk )
sige = np.hstack((sig, esig))
#
# 2 x (Nint+k+2) x Nrayk
#
# sort extended sequence of points
# and extended sequence of signatures with the sorting
# index ks obtained from argsort of merge parametization
#
# sequence of extended sorted points
#
ptees = ptee[:, ks, range(Nrayk)]
siges = sige[:, ks, range(Nrayk)]
# extended and sorted signature
iint_f, iray_f = np.where(siges[ 1, :] == 4) # floor interaction
iint_c, iray_c = np.where(siges[ 1, :] == 5) # ceil interaction
#print siges
#
# find the list of the previous and next point around the
# new ceil or floor point. The case of successive ceil or
# floor reflexion make
#
# Tous les points prcdents qui ne sont pas des Ceils ou
# des floors et tous les points suivants qui ne sont pas
# des points de rflexion ceil ou floor
#
# Afin de tenir compte du rayon et du groupe d'interactions
# concerne, il faut passer un tuple qui concatene la valeur
# de l'indice d'interaction floor ou ceil et l'indice de
# rayons du groupe associe (d'ou le zip)
#
# Cette sequence d'instruction fixe le bug #133
#
# Antrieurement il y avait une hypothese de succession
# immediate d'un point 2D renseigne.
#
try:
iintm_f = [ np.where( (siges[1,0:x[0],x[1]]!=4) &
(siges[1,0:x[0],x[1]]!=5))[0][-1]
for x in zip(iint_f,iray_f) ]
iintp_f = [ np.where( (siges[1,x[0]:,x[1]]!=4) &
(siges[1,x[0]:,x[1]]!=5))[0][0]+x[0]
for x in zip(iint_f,iray_f) ]
iintm_c = [ np.where( (siges[1,0:x[0],x[1]]!=4) &
(siges[1,0:x[0],x[1]]!=5))[0][-1]
for x in zip(iint_c,iray_c) ]
iintp_c = [ np.where( (siges[1,x[0]:,x[1]]!=4) &
(siges[1,x[0]:,x[1]]!=5))[0][0]+x[0]
for x in zip(iint_c,iray_c) ]
except:
pdb.set_trace()
# Update coordinate in the horizontal plane
#
#
# The new interaction ceil or floor has no coordinates in
# the horizontal plane.
# Those coordinates are evaluated first by finding a sub
# parameterization of the point with respect to the two
# known adjascent interaction point j-1 and j+1 (Thales)
#
#iintm_f = iint_f - 1
#iintp_f = iint_f + 1
#iintm_c = iint_c - 1
#iintp_c = iint_c + 1
#
# If there are floor points
#
if len(iint_f)>0:
a1esm_f = a1es[iintm_f, iray_f]
a1esc_f = a1es[iint_f, iray_f]
a1esp_f = a1es[iintp_f, iray_f]
pteesm_f = ptees[0:2, iintm_f, iray_f]
pteesp_f = ptees[0:2, iintp_f, iray_f]
coeff_f = (a1esc_f-a1esm_f)/(a1esp_f-a1esm_f)
ptees[0:2, iint_f, iray_f] = pteesm_f + coeff_f*(pteesp_f-pteesm_f)
#
# If there are ceil points
#
if len(iint_c)>0:
a1esm_c = a1es[iintm_c, iray_c]
a1esc_c = a1es[iint_c, iray_c]
a1esp_c = a1es[iintp_c, iray_c]
pteesm_c = ptees[0:2, iintm_c, iray_c]
pteesp_c = ptees[0:2, iintp_c, iray_c]
coeff_c = (a1esc_c-a1esm_c)/(a1esp_c-a1esm_c)
ptees[0:2, iint_c, iray_c] = pteesm_c + coeff_c*(pteesp_c-pteesm_c)
if H != 0:
z = np.mod(l+a1es*(rx[2]-l), 2*H)
pz = np.where(z > H)
z[pz] = 2*H-z[pz]
ptees[2, :] = z
# case where ceil reflection are inhibited
elif H==0:
z = abs(l+a1es*(rx[2]-l))
# pz = np.where(z > H)
# z[pz] = 2*H-z[pz]
ptees[2, :] = z
# recopy old 2D parameterization (no extension)
else:
a1es = a1
ks = np.argsort(a1es, axis=0)
ptees = pte
# fixing bug
siges = copy.copy(sig)
#print siges
#---------------------------------
# handling multi segment (iso segments)
# Height of reflexion interaction
# Height of diffraction interaction
#---------------------------------
#
# ptes (3 x i+2 x r )
if len(L.lsss)>0:
#
# lsss : list of sub segments ( iso segments siges)
# lnss : list of diffaction point involving
lsss = np.array(L.lsss)
lnss = np.array(L.lnss)
# array of structure element (nstr) with TxRx extension (nstr=0)
anstr = siges[0,:,:]
# type of interaction
typi = siges[1,:,:]
# lss : list of subsegments in the current signature
#
# scalability : avoid a loop over all the subsegments in lsss
#
lss = [ x for x in lsss if x in anstr.ravel()]
ray_to_delete = []
for s in lss:
u = np.where(anstr==s)
if len(u)>0:
zs = ptees[2,u[0],u[1]]
zinterval = L.Gs.node[s]['z']
unot_in_interval = ~((zs<=zinterval[1]) & (zs>=zinterval[0]))
ray_to_delete.extend(u[1][unot_in_interval])
# lns : list of diffraction points in the current signature
# with involving multi segments (iso)
# scalability : avoid a loop over all the points in lnss
#
lns = [ x for x in lnss if x in anstr.ravel()]
#
# loop over multi diffraction points
#
for npt in lns:
# diffraction cornet in espoo.lay
#if npt==-225:
# import ipdb
# ipdb.set_trace()
u = np.where(anstr==npt)
if len(u)>0:
# height of the diffraction point
zp = ptees[2,u[0],u[1]]
#
# At which couple of segments belongs this height ?
# get_diffslab function answers that question
#
ltu_seg,ltu_slab = L.get_diffslab(npt,zp)
#
# delete rays where diffraction point is connected to
# 2 AIR segments
#
[ray_to_delete.append(u[1][i]) for i in range(len(zp))
if ((ltu_slab[i][0]=='AIR') & (ltu_slab[i][1]=='AIR'))]
# #zinterval = L.Gs.node[s]['z']
# # if (zs<=zinterval[1]) & (zs>=zinterval[0]):
# if ((tu_slab[0]!='AIR') & (tu_slab[1]!='AIR')):
# #print(npt , zp)
# pass
# else:
# ray_to_delete.append(u[1][0])
# # nstr : structure number
# nstr = np.delete(nstr,ray_to_delete,axis=1)
# typi : type of interaction
typi = np.delete(typi,ray_to_delete,axis=1)
# 3d sequence of points
ptees = np.delete(ptees,ray_to_delete,axis=2)
# extended (floor/ceil) signature
siges = np.delete(siges,ray_to_delete,axis=2)
if rmoutceilR:
# 1 determine Ceil reflexion index
# uc (inter x ray)
uc = np.where(siges[1,:,:]==5)
ptc = ptees[:,uc[0],uc[1]]
if len(uc[0]) !=0:
P = shg.MultiPoint(ptc[:2,:].T)
# to determine the cycle where ceil reflexions append
# uinter(nb pt x nb cycles)
mapnode = list(L.Gt.nodes())
uinter = np.array([[L.Gt.node[x]['polyg'].contains(p) for x in mapnode if x>0] for p in P])
# import ipdb
# ipdb.set_trace()
#[plt.scatter(p.xy[0],p.xy[1],c='r') for up,p in enumerate(P) if uinter[0,up]]
#[ plt.scatter(p.xy[0],p.xy[1],c='r') for up,p in enumerate(P) if uinter[0,up]]
# find points are indoor/outdoor cycles
upt,ucy = np.where(uinter)
uout = np.where([not L.Gt.node[mapnode[u+1]]['indoor'] for u in ucy])[0] #ucy+1 is to manage cycle 0
# 3 remove ceil reflexions of outdoor cycles
if len(uout)>0:
ptees = np.delete(ptees,uc[1][uout],axis=2)
siges = np.delete(siges,uc[1][uout],axis=2)
sigsave = np.delete(sigsave,uc[1][uout],axis=2)
if k+Nint in r3d:
r3d[k+Nint]['pt'] = np.dstack((r3d[k+Nint]['pt'], ptees))
r3d[k+Nint]['sig'] = np.dstack((r3d[k+Nint]['sig'], siges))
r3d[k+Nint]['sig2d'].append(sigsave)
else:
if ptees.shape[2]!=0:
r3d[k+Nint] = {}
r3d[k+Nint]['pt'] = ptees
r3d[k+Nint]['sig'] = siges
r3d[k+Nint]['sig2d'] = [sigsave]
# ax=plt.gca()
# uu = np.where(ptees[2,...]==3.0)
# ax.plot(ptees[0,uu[0],uu[1]],ptees[1,uu[0],uu[1]],'ok')
# import ipdb
# ipdb.set_trace()
#
# Add Line Of Sight ray information
# pt = [tx,rx]
# sig = [0,0]
#
#pdb.set_trace()
# if (self.los) & (np.sqrt(np.sum((tx-rx)**2)) !=0) :
# r3d[0] = {}
# r3d[0]['sig'] = np.zeros((2,2,1))
# r3d[0]['sig2d'] = np.zeros((2,2,1))
# r3d[0]['pt'] = np.zeros((3,2,1))
# r3d[0]['pt'][:,0,:] = tx[:,np.newaxis]
# r3d[0]['pt'][:,1,:] = rx[:,np.newaxis]
# r3d.nray = reduce(lambda x,y : y + np.shape(r3d[x]['sig'])[2],lnint)
# count total number of ray
# evaluate length of ray segment
#
# vsi
# si
# dis
#
val =0
for k in r3d.keys():
nrayk = np.shape(r3d[k]['sig'])[2]
r3d[k]['nbrays'] = nrayk
r3d[k]['rayidx'] = np.arange(nrayk)+val
r3d.nray = r3d.nray + nrayk
val=r3d[k]['rayidx'][-1]+1
# 3 : x,y,z
# i : interaction index
# r : ray index
#
# k : group of interactions index
#
v = r3d[k]['pt'][:, 1:, :]-r3d[k]['pt'][:, 0:-1, :]
lsi = np.sqrt(np.sum(v*v, axis=0))
rlength = np.sum(lsi,axis=0)
if (lsi.any()==0):
pdb.set_trace()
if not (lsi.all()>0):
pdb.set_trace()
#assert(lsi.all()>0)
if (len(np.where(lsi==0.))==0) :
pdb.set_trace()
#
# sort rays w.r.t their length
#
u = np.argsort(rlength)
r3d[k]['pt'] = r3d[k]['pt'][:,:,u]
r3d[k]['sig'] = r3d[k]['sig'][:,:,u]
#r3d[k]['sig2d'] = r3d[k]['sig2d'][:,:,u]
si = v/lsi # ndim , nint - 1 , nray
# vsi : 3 x (i+1) x r
r3d[k]['vsi'] = si[:,:,u]
# si : (i+1) x r
r3d[k]['si'] = lsi[:,u]
r3d[k]['dis'] = rlength[u]
r3d.delays = np.zeros((r3d.nray))
for k in r3d.keys():
ir = r3d[k]['rayidx']
r3d.delays[ir] = r3d[k]['dis']/0.3
r3d.origin_sig_name = self.origin_sig_name
r3d.Lfilename = L._filename
r3d.filename = L._filename.split('.')[0] + '_' + str(r3d.nray)
return(r3d)
def get_rays_slabs(self,L,ir):
""" return the slabs for a given interaction index
Parameters
----------
L : Layout
ir : interaction block
Returns
-------
numpy array of slabs strings at the shape (ir,r)
ir : number of interactions ( of the interaction block)
r : number of rays
"""
v=np.vectorize( lambda t: L.Gs.node[t]['name'] if (t!=0) and (t>0) else '_')
return v(self[ir]['sig'][0])
def remove_aw(self,L):
""" remove AIR interactions
"""
# def consecutive(data, stepsize=1):
# return np.split(data, np.where(np.diff(data) != stepsize)[0]+1)
R = Rays(self.pTx,self.pRx)
R.__dict__.update(self.__dict__)
# R.is3D=True
# R.nray = self.nray
# R.nray2D = self.nray2D
# R.nray2D = self.nray2D
# R.nray2D = self.nray2D
for k in self:
lr = self[k]['sig'].shape[1]
inter = self.get_rays_slabs(L,k)
for ur,r in enumerate(inter.T):
not_air_mask = ~((r =='_AIR') | (r == 'AIR' ))
nb_air = sum(~not_air_mask)
if nb_air != 0 :
new_bi = k-nb_air
# +2 : add tx & rx interaciton
# -1 : 2 interactions correspond to 1 distance
lsi = new_bi + 2 - 1
si = np.zeros(lsi)
si_old = self[k]['si'][:,ur]
vsi = np.zeros((3,lsi))
vsi_old = self[k]['vsi'][...,ur]
sig = self[k]['sig'][:,not_air_mask,ur][...,None]
# sig2d = self[k]['sig2d'][0][...,ur]
pt = self[k]['pt'][:,not_air_mask,ur][...,None]
u = 0
si_aw = 0
# import ipdb
# ipdb.set_trace()
for uold,b in enumerate(not_air_mask[1:]):
if b:
# update new si with sum of all
# distance from preceding airwall
si[u] = si_old[uold] + si_aw
# keep vsi from the last airwall
# because vsi don't change on an airwall
vsi[:,u] = vsi_old[:,uold]
u += 1
si_aw=0
else:
si_aw += si_old[uold]
si = si[...,None]
vsi = vsi[...,None]
dis = np.array([np.sum(si)])
assert np.allclose(dis,np.sum(si_old))
else:
# no air wall case, fill R with self values
new_bi = k
pt = self[k]['pt'][...,ur][...,None]
sig = self[k]['sig'][...,ur][...,None]
# sig2d = self[k]['sig2d'][0][...,ur]
si = self[k]['si'][:,ur][:,None]
vsi = self[k]['vsi'][...,ur][...,None]
dis = np.array([self[k]['dis'][ur]])
if new_bi in R:
# R[new_bi]['sig2d'].append(self[k]['sig2d'][ur])
R[new_bi]['pt'] = np.concatenate((R[new_bi]['pt'],pt),axis=2)
R[new_bi]['sig'] = np.concatenate((R[new_bi]['sig'],sig),axis=2)
R[new_bi]['rayidx'] = np.concatenate((R[new_bi]['rayidx'],np.array([self[k]['rayidx'][ur]])))
R[new_bi]['si'] = np.concatenate((R[new_bi]['si'],si),axis=1)
R[new_bi]['vsi'] = np.concatenate((R[new_bi]['vsi'],vsi),axis=2)
R[new_bi]['dis'] = np.concatenate((R[new_bi]['dis'],dis),axis=0)
else:
R[new_bi] = {}
# R[new_bi]['sig2d'] = [self[k]['sig2d'][0][...,ur]]
R[new_bi]['pt'] = pt
R[new_bi]['sig'] = sig
R[new_bi]['rayidx'] = np.array([self[k]['rayidx'][ur]])
R[new_bi]['si'] = si
R[new_bi]['vsi'] = vsi
R[new_bi]['dis'] = dis
if 0 in R:
R.los=True
X = [[R[k]['rayidx'][u] for u in range(len(R[k]['rayidx']))] for k in R]
R._rayidx_aw = sum(X,[])
return R
def length(self,typ=2):
""" calculate length of rays
Parameters
----------
typ : int
men1 : length of all segments
2 : accumulated length
"""
dk = {}
for k in self: # for all interaction group k
# 3 x Ni-1 x Nr
vk = self[k]['pt'][:,1:,:]-self[k]['pt'][:,0:-1,:]
d1 = np.sqrt(np.sum(vk*vk,axis=0))
d2 = np.sum(d1,axis=0)
if typ==1:
dk[k] = d1
if typ==2:
dk[k] = d2
return(dk)
def simplify(self):
if not self.is3D:
return None
for ir in self:
print(self[ik]['si'])
def locbas(self, L):
""" calculate ray local bas
Parameters
----------
L : Layout
Notes
-----
This method adds for each group of interactions the following members
norm : np.array
3 x i x r (interaction vector)
nstrwall : np.array
nstr of interactions
vsi : np.array
3 x (i+1) x r
aod : np.array
2 x r
aoa : np.array
2 x r
BoO : np.array
3 x 3 x r
Bi : np.array
3 x 3 x r
Bo : np.array
3 x 3 x r
BiN : np.array
3 x 3 x r
scpr : np.array
i x r
theta : np.array
i x r
rays : int
nbrays : int
rayidx : np.array
diffslabs : list
diffvect : np.array
(phi0,phi,beta,NN)
"""
#
# extract normal in np.array
#
# nsegment x 3
norm = np.array(list(nx.get_node_attributes(L.Gs,'norm').values()))
# nsegment x k
key = np.array(list(dict(nx.get_node_attributes(L.Gs,'norm')).keys()))
# maximum number for refering to segment
# not to be confused with a segment number
nsmax = max(L.Gs.node.keys())
mapping = np.zeros(nsmax+1, dtype=int)
mapping[key] = np.arange(len(key), dtype=int)
#
# Structure number : nstr
# the structure number is < 0 for points
# > 0 for segments
# A segment can have several subsegments (until 100)
# nstrs is the nstr of the segment if subsegment :
# nstr is the glabal which allows to recover the slab values
#
idx = np.array(())
if self.los:
idxts = 1
nbrayt = 1
else:
idxts = 0
nbrayt = 0
# list of used wedges
luw=[]
lgi = list(self.keys())
lgi.sort()
for k in lgi:
#
# k is the number of interactions in the block
#
#print(k,self[11]['rayidx'])
if k != 0:
# structure number (segment or point)
# nstr : i x r
nstr = self[k]['sig'][0, 1:-1, :]
# ityp : i x r
ityp = self[k]['sig'][1, 1:-1, :]
# nstr of underlying segment
# position of interaction corresponding to a sub segment
# print nstr
#
# uss : index of subsegment
# subsegments are not nodes of Gs but have positive nstr index
#
uss = np.where(nstr > nsmax)
# print uss
nstrs = copy.copy(nstr)
#
# if subsegments have been found
#
if len(uss) >0:
ind = nstr[uss]- nsmax-1
nstrs[uss] = np.array(L.lsss)[ind]
# print nstr
#print nstrs
#pdb.set_trace()
nray = np.shape(nstr)[1]
uwall = np.where((ityp == 2) | (ityp == 3))
udiff = np.where((ityp == 1))
ufloor= np.where((ityp == 4))
uceil = np.where((ityp == 5))
nstrwall = nstr[uwall[0], uwall[1]] # nstr of walls
nstrswall = nstrs[uwall[0], uwall[1]] # nstrs of walls
self[k]['nstrwall'] = nstrwall # store nstr without subsegment
self[k]['nstrswall'] = nstrswall # store nstr with subsegment
self[k]['norm'] = np.zeros((3, k, nray)) # 3 x int x nray
# norm : 3 x i x r
#
# norm is the vector associated to the interaction
# For the diffraction case the normal is replaced by the unit
# vector along the wedge directed upward.
#
self[k]['norm'][:, uwall[0], uwall[1]] = norm[mapping[nstrswall],:].T
self[k]['norm'][2, ufloor[0], ufloor[1]] = np.ones(len(ufloor[0]))
self[k]['norm'][2, uceil[0], uceil[1]] = -np.ones(len(uceil[0]))
self[k]['norm'][2, udiff[0], udiff[1]] = np.ones(len(udiff[0]))
normcheck = np.sum(self[k]['norm']*self[k]['norm'],axis=0)
assert normcheck.all()>0.99,pdb.set_trace()
# 3 : x,y,z
# i : interaction index
# r : ray index
#
# k : group of interactions index
#
#v = self[k]['pt'][:, 1:, :]-self[k]['pt'][:, 0:-1, :]
#lsi = np.sqrt(np.sum(v*v, axis=0))
#if (lsi.any()==0):
# pdb.set_trace()
#assert(lsi.all()>0)
#if (len(np.where(lsi==0.))==0) :
# pdb.set_trace()
#si = v/lsi # ndim , nint - 1 , nray
# si : 3 x (i+1) x r
si = self[k]['vsi']
# si : (i+1) x r
#self[k]['si'] = lsi
#self[k]['dis'] = np.sum(lsi,axis=0)
# normal : 3 x i x r
vn = self[k]['norm']
# s_in : 3 x i x r
s_in = si[:, 0:-1, :]
# s_out : 3 x i x r
s_out = si[:, 1:, :]
#
# AOD (rad)
#
# th : ,r
thd = np.arccos(si[2, 0, :])
# ph : ,r
phd = np.arctan2(si[1, 0, :], si[0, 0, :])
# aod : 2 x r (radians)
self[k]['aod'] = np.vstack((thd, phd))
# eth : 3 x r
eth = np.array([np.cos(thd) * np.cos(phd),
np.cos(thd) * np.sin(phd),
-np.sin(thd)])
# eph : 3 x r
eph = np.array([-np.sin(phd),
np.cos(phd),
np.zeros(len(phd))])
# Bo0 : 3 x 3 x r
Bo0 = np.concatenate((si[:, 0, None, :],
eth[:, None, :],
eph[:, None, :]), axis=1)
self[k]['Bo0'] = Bo0
#
# scalar product si . norm
#
# vn : 3 x i x r
# s_in : 3 x i x r
#
# scpr : i x r
#
scpr = np.sum(vn*si[:,0:-1,:], axis=0)
self[k]['scpr'] = scpr
self[k]['theta'] = np.arccos(abs(scpr)) # *180/np.pi
def fix_colinear(w):
"""
w : vector
"""
nw = np.sqrt(np.sum(w*w, axis=0))
u = np.where(nw==0)
if len(u[0])!=0:
logger.debug('colinear situation detected')
if (u[0].any() or u[1].any()) \
or (u[0].any()==0 or u[1].any()==0):
uu = np.array([u[0],u[1]]).T
#determine which interaction and rays
#present the colinearity issue
uvv = abs(vn[2,uu[:,0],uu[:,1]])>0.99
# uv : nbi x nbr colinear index
uv = uu[uvv]
# uh : nbi x nbr anti-colinear index
uh = uu[np.logical_not(uvv)]
try:
#fix w for colinear index
w[:,uv[:,0],uv[:,1]] = np.array(([1,0,0]))[:,None]
# update normal
nw[uv[:,0],uv[:,1]] = np.sqrt(np.sum(
w[:,uv[:,0],uh[:,1]]*w[:,uv[:,0],uv[:,1]],axis=0))
except:
pass
try:
# fix w for anti-colinear index
w[:,uh[:,0],uh[:,1]] = np.array(([0,0,1]))[:,None]
# update normal
nw[uh[:,0],uh[:,1]] = \
np.sqrt(np.sum(w[:,uh[:,0],uh[:,1]]*w[:,uh[:,0],uh[:,1]],axis=0))
except:
pass
return w, nw
#
# Warning need to handle singular case when s_in // vn
#
# w : 3 x i x r
#
w = np.cross(s_in, vn, axisa=0, axisb=0, axisc=0)
# nw : i x r
w, nw = fix_colinear(w)
wn = w/nw
v = np.cross(wn, s_in, axisa=0, axisb=0, axisc=0)
es_in = np.expand_dims(s_in, axis=1)
ew = np.expand_dims(wn, axis=1)
ev = np.expand_dims(v, axis=1)
# Bi 3 x 3 x i x r
Bi = np.concatenate((es_in,ew,ev),axis=1)
# self[k]['Bi'] 3 x 3 x i x r
self[k]['Bi'] = Bi
################################
w = np.cross(s_out, vn, axisa=0, axisb=0, axisc=0)
w, nw = fix_colinear(w)
#wn = w/np.sqrt(np.sum(w*w, axis=0))
wn = w/nw
v = np.cross(wn, s_out, axisa=0, axisb=0, axisc=0)
es_out = np.expand_dims(s_out, axis=1)
ew = np.expand_dims(wn, axis=1)
ev = np.expand_dims(v, axis=1)
# Bi 3 x 3 x i x r
Bo = np.concatenate((es_out,ew,ev),axis=1)
# self[k]['Bo'] 3 x 3 x i x r
self[k]['Bo'] = Bo
#
# AOA (rad)
#
# th : ,r
# fix doa/dod reciprocity
#th = np.arccos(si[2, -1, :])
tha = np.arccos(si[2, -1, :])
# th : ,r
#ph = np.arctan2(si[1, -1, :], si[0, -1, :])
pha = np.arctan2(si[1, -1, :], si[0, -1, :])
# aoa : 2 x r (radians)
self[k]['aoa'] = np.vstack((tha, pha))
eth = np.array([np.cos(tha) * np.cos(pha),
np.cos(tha) * np.sin(pha),
-np.sin(tha)])
eph = np.array([-np.sin(pha),
np.cos(pha),
np.zeros(len(pha))])
# Bo0 : 3 x 3 x r
BiN = np.concatenate((si[:,-1,None,:],
eth[:, None, :],
eph[:, None, :]), axis=1)
self[k]['BiN'] = BiN
#self[k]['BiN'] = np.concatenate((-si[:,-1,np.newaxis,:],eth[:,np.newaxis,:],
# eph[:,np.newaxis,:]),axis=1)
# Creation of B from Bi and Bo
# is done after the potential diffraction
# computation
## index creation
##################
# create index for retrieving interactions
# integer offset : total size idx
idxts = idxts + idx.size
idx = idxts + np.arange(ityp.size).reshape(np.shape(ityp),order='F')
nbray = np.shape(idx)[1]
self[k]['rays'] = idx
self[k]['nbrays'] = nbray
self[k]['rayidx'] = nbrayt + np.arange(nbray)
# create a numpy array to relate the ray index to its corresponding
# number of interactions
#pdb.set_trace()
_ray2nbi = np.ones((nbray), dtype=int)
try:
self._ray2nbi = np.hstack((self._ray2nbi,_ray2nbi))
except:
self._ray2nbi = _ray2nbi
self._ray2nbi[self[k]['rayidx']] = k
nbrayt = nbrayt + nbray
self.raypt = self.raypt + self[k]['nbrays']
#################################
# Start diffraction specific case
#################################
if len(udiff[0]) != 0 :
Z = np.where(ityp.T==1)
udiff=Z[1],Z[0]
# diffseg,udiffseg = np.unique(nstr[udiff],return_inverse=True)
diffupt=nstr[udiff]
# position of diff seg (- because iupnt accept > 0 reference to points)
#
# TO BE FIXED
#
#ptdiff = L.pt[:,L.iupnt[-diffupt]]
ptdiff = np.array([ (L.Gs.pos[x][0],L.Gs.pos[x][1]) for x in diffupt ]).T
self[k]['diffidx'] = idx[udiff[0],udiff[1]]
# get tail head position of seg associated to diff point
lair = L.name['AIR'] + L.name['_AIR']
#aseg = map(lambda x : filter(lambda y : y not in lair,
# nx.neighbors(L.Gs,x)),
# diffupt)
aseg = [ [ y for y in nx.neighbors(L.Gs,x) if y not in lair ] for x in diffupt ]
#manage flat angle : diffraction by flat segment e.g. door limitation)
[aseg[ix].extend(x) for ix,x in enumerate(aseg) if len(x)==1]
# get points positions
#pdb.set_trace()
pts = np.array([ L.seg2pts([x[0],x[1]]) for x in aseg ])
#self[k]['diffslabs']=[str(L.sl[L.Gs.node[x[0]]['name']])+'_'
# + str(L.sl[L.Gs.node[x[1]]['name']]]) for x in aseg]
self[k]['diffslabs']=[ L.Gs.node[x[0]]['name']+'@'
+ L.Gs.node[x[1]]['name'] for x in aseg]
uwl = np.unique(self[k]['diffslabs']).tolist()
luw.extend(uwl)
pt1 = pts[:,0:2,0] #tail seg1
ph1 = pts[:,2:4,0] #head seg1
pt2 = pts[:,0:2,1] #tail seg2
ph2 = pts[:,2:4,1] #head seg2
#pts is (nb_diffraction_points x 4 x 2)
#- The dimension 4 represent the 2x2 points: t1,h1 and t2,h2
# tail and head of segment 1 and 2 respectively
# a segment
#- The dimension 2 is x,y
#
# The following aims to determine which tails and heads of
# segments associated to a given diffraction point
# are connected
#
#
# point diff is pt1
updpt1 = np.where(np.sum(ptdiff.T==pt1,axis=1)==2)[0]
# point diff is ph1
updph1 = np.where(np.sum(ptdiff.T==ph1,axis=1)==2)[0]
# point diff is pt2
updpt2 = np.where(np.sum(ptdiff.T==pt2,axis=1)==2)[0]
# point diff is ph2
updph2 = np.where(np.sum(ptdiff.T==ph2,axis=1)==2)[0]
pa = np.empty((len(diffupt),2))
pb = np.empty((len(diffupt),2))
####seg 1 :
#if pt1 diff point => ph1 is the other point
pa[updpt1]= ph1[updpt1]
#if ph1 diff point => pt1 is the other point
pa[updph1]= pt1[updph1]
####seg 2 :
#if pt2 diff point => ph2 is the other point
pb[updpt2]= ph2[updpt2]
#if ph2 diff point => pt2 is the other point
pb[updph2]= pt2[updph2]
pt = ptdiff.T
# NN : (nb_diffraction_points)
# alpha wegde (a.k.a. wedge parameters, a.k.a wedge aperture)
NN = (360.-geu.sector(pa.T,pb.T,pt.T))/180.
# NN = (2.-NN)*np.pi
#angle between face 0, diffraction point and s_in
#s_in[:2,udiff[0],udiff[1]] :
# s_in of insteractions udiff (2D) restricted to diffraction points
vptpa = pt-pa
vptpan = vptpa.T / np.sqrt(np.sum((vptpa)*(vptpa),axis=1))
# vpapt= pa-pt # papt : direction vector of face 0
# vpaptn = vpapt.T / np.sqrt(np.sum((vpapt)*(vpapt),axis=1))
sid = s_in[:,udiff[0],udiff[1]] #s_in restricted to diff
sod = s_out[:,udiff[0],udiff[1]] #s_out restricted to diff
vnormz = self[k]['norm'][:, udiff[0], udiff[1]]
#phi0 = arccos(dot(sid*vpavptn))
# phi0 = geu.vecang(sid[:2],vpaptn)
uleft = geu.isleft(pa.T,pt.T,pb.T)
phi0 = geu.vecang(vptpan,sid[:2])
phi0[~uleft] = geu.vecang(sid[:2,~uleft],vptpan[:,~uleft])
# phi0 = np.arccos(np.sum(sid[:2]*vpaptn,axis=0))
#phi = arccos(dot(sod*vpavptn))
# phi = np.arccos(np.sum(-sod[:2]*vpaptn,axis=0))
phi = geu.vecang(vptpan,-sod[:2])
phi[~uleft] = geu.vecang(-sod[:2,~uleft],vptpan[:,~uleft])
# beta
#it is important to check if the sid comes from left or right
#to this end assume that sid vector is composed
#of 2 point : (0,0) and sid
# compared to the position of the diffraction point in x
# with an elevation=0
sidxz = sid[[0,2]]
vnormxz = vnormz[[0,2]]
zero = np.zeros((2,ptdiff.shape[1]))
zdiff = np.vstack((ptdiff[0],zero[0]))
left = geu.isleft(zero,sidxz,zdiff)
beta = np.arccos(np.sum(vnormz*sid,axis=0))
# self[k]['diffvect'] is (4 x Nb_rays )
# for axis 0 lenght 4 represent :
# 0 => phi0
# 1 => phi
# 2 => beta
# 3 => N (wedge parameter)
self[k]['diffvect']=np.array((phi0,phi,beta,NN))
######
#Bi diffract
#####
#w is the \perp \soft in diff
w = np.cross(-sid, vnormz, axisa=0, axisb=0, axisc=0)
# nw : i x r
w, nw = fix_colinear(w)
wn = w/nw
# Handling channel reciprocity s_in --> -s_in
#v = np.cross(wn, s_in, axisa=0, axisb=0, axisc=0)
v = np.cross(wn, -sid, axisa=0, axisb=0, axisc=0)
e_sid = np.expand_dims(-sid, axis=1)
ew = np.expand_dims(wn, axis=1)
ev = np.expand_dims(v, axis=1)
# Bid 3 x 3 x (i,r)diff
Bid = np.concatenate((e_sid,ev, ew), axis=1)
#update Bi for diffracted rays
Bi[:,:,udiff[0],udiff[1]] = Bid
######
#Bo diffract
#####
w = np.cross(sod,vnormz, axisa=0, axisb=0, axisc=0)
w, nw = fix_colinear(w)
wn = w/nw
#wn = w/np.sqrt(np.sum(w*w, axis=0))
v = np.cross(wn, sod, axisa=0, axisb=0, axisc=0)
e_sod = np.expand_dims(sod, axis=1)
ew = np.expand_dims(wn, axis=1)
ev = np.expand_dims(v, axis=1)
# Bod 3 x 3 x (i,r)diff
Bod = np.concatenate((e_sod,ev, ew), axis=1)
#update Bo for diffracted rays
Bo[:,:,udiff[0],udiff[1]] = Bod
#################################
# End of diffraction specific case
##################################
#
# pasting (Bo0,B,BiN)
#
# B : 3 x 3 x i x r
Bo = np.concatenate((Bo0[:, :, np.newaxis, :], Bo), axis=2)
Bi = np.concatenate((Bi, BiN[:, :, np.newaxis, :]), axis=2)
# B : 3 x 3 x i x r
self[k]['B'] = np.einsum('xv...,xw...->vw...', Bi, Bo)
#self[k]['B'] = np.einsum('vx...,xw...->vw...', Bi, Bo)
#BiN = np.array([si[:,-1,:], eth, eph]) # ndim x 3 x Nray
#self[k]['BiN']=BiN
# self[k]['B']=np.sum(self[k]['Bi'][:2,:2,np.newaxis]*self[k]['Bo'][np.newaxis,:2,:2],axis=1)
# if los exists
else :
self[k]['nstrwall'] = np.array(())
self[k]['norm'] = np.array(())
si = np.sqrt(np.sum((self[0]['pt'][:,0]-self[0]['pt'][:,1])**2,axis=0))
self[k]['si'] = np.vstack((si,0.))
self[k]['vsi'] = (self[0]['pt'][:,1]-self[0]['pt'][:,0])/si
self[k]['dis'] = np.array((si))
vsi = self[k]['vsi']
thd = np.arccos(vsi[2])
phd = np.arctan2(vsi[1], vsi[0])
self[k]['aod'] = np.vstack((thd, phd))
self[k]['Bo0'] = np.array(())
self[k]['scpr'] = np.array(())
self[k]['theta'] = np.zeros((1,1))
#
# The following derivation of the doa is the actual chosen angle convention
# Those angles are relative to natural spherical coordinates system in the gcs of the scene.
#
# for a LOS path :
# tha = pi - thd
# pha = phd - pi
#
#self[k]['aoa'] = np.vstack((np.pi-thd, phd-np.pi))
self[k]['aoa'] = np.vstack((thd,phd))
E = np.eye(2)[:,:,np.newaxis,np.newaxis]
self[k]['B'] = np.dstack((E,E))
ze = np.array([0])
self[k]['rays'] = np.array(([[0]]))
self[k]['nbrays'] = 1
self[k]['rayidx'] = ze
self.raypt = 1
self._ray2nbi = ze
self._luw = np.unique(luw).tolist()
self.isbased = True
def fillinter(self, L, append=False):
""" fill ray interactions
Parameters
----------
L : Layout
append : Boolean
If True append new rays to existing structure
Notes
-------
This method adds the following members
I : Interactions
B : IntB
B0 : IntB
"""
# reinitialized ray pointer if not in append mode
if not append:
self.raypt = 0
# stacked interactions
I = Interactions(slab=L.sl)
# rotation basis
B = IntB(slab=L.sl)
B0 = IntB(slab=L.sl)
# # LOS Interaction
# Los = IntL()
# Reflexion
R = IntR(slab=L.sl)
# Transmission
T = IntT(slab=L.sl)
# Diffraction
D = IntD(slab=L.sl)
idx = np.array(())
if self.los:
idxts = 1
nbrayt = 1
else:
idxts = 0
nbrayt = 0
# Transform dictionnary of slab name to array
# slv = nx.get_node_attributes(L.Gs, "name").values()
# slk = nx.get_node_attributes(L.Gs, "name").keys()
# find all material used in simulation
#uslv = np.unique(L.sla[1:])
uslv = L.sl.keys()
#
# add CEIL and FLOOR
#
#uslv = np.hstack((uslv, np.array(('CEIL', 'FLOOR'))))
# create reverse dictionnary with all material as a key
# and associated point/segment as a value
#dsla = {}
#for s in uslv:
# dsla[s] = np.where(s == np.array(slv))[0]
nsmax = max(L.Gs.node.keys())
#sla = np.zeros((nsmax+1), dtype='S20')
# array type str with more than 1 character
# warning use zeros instead of empty because slab zero
# is virtually used before assigning correct slab to ceil and floor
#
# sla is an array of string.
# each value of Gs node is the index of the corresponding slab
#
#sla[slk] = np.array(slv)
R.dusl = dict.fromkeys(uslv, np.array((), dtype=int))
T.dusl = dict.fromkeys(uslv, np.array((), dtype=int))
#to be specified and limited to used wedges
if hasattr(self,'_luw'):
D.dusl = dict.fromkeys(self._luw, np.array((), dtype=int))
# transmission/reflection slab array
tsl = np.array(())
rsl = np.array(())
# diffraction wedge list
dw = np.array(())
# loop on group of interactions
for k in self:
if k !=0:
uR = uT = uD = uRf = uRc = 0.
# structure number (segment or point)
# nstr : i x r
nstr = self[k]['sig'][0, 1:-1, :]
# ityp : i x r
ityp = self[k]['sig'][1, 1:-1, :]
# theta : i x r ( related to interactions )
theta = self[k]['theta']
# (i+1) x r
si = self[k]['si']
# distance in
s_in = si[0:-1,:]
# distance in
s_out = si[1:,:]
if 'diffvect' in self[k]:
dvec = self[k]['diffvect']
ldsl = self[k]['diffslabs']
dix = self[k]['diffidx']
## flatten information
######################
# flatten nstr (1 dimension)
# size1 = i x r
size1 = nstr.size
# flatten ityp (method faster than np.ravel() )
nstrf = np.reshape(nstr,size1,order='F')
itypf = ityp.reshape(size1,order='F')
thetaf = theta.reshape(size1,order='F')
#sif = si[0, :, :].reshape(si[0, :, :].size)
# ## index creation / already done in rays.locbas
# ##################
# # create index for retrieving interactions
# # integer offset : total size idx
# idxts = idxts + idx.size
# idx = idxts + np.arange(ityp.size).reshape(np.shape(ityp),order='F')
# nbray = np.shape(idx)[1]
# self[k]['rays'] = idx
# self[k]['nbrays'] = nbray
# self[k]['rayidx'] = nbrayt + np.arange(nbray)
# # create a numpy array to relate the ray index to its corresponding
# # number of interactions
# # _ray2nbi = np.ones((nbray))
# #try:
# # self._ray2nbi=np.hstack((self._ray2nbi,_ray2nbi))
# #except:
# # self._ray2nbi=_ray2nbi
# #self._ray2nbi[self[k]['rayidx']] = k
# nbrayt = nbrayt + nbray
# #self.raypt = self.raypt + self[k]['nbrays']
idxf = self[k]['rays'].reshape(self[k]['rays'].size,order='F')
# (i+1)xr
#
size2 = si[:, :].size
nbray = self[k]['nbrays']
# TODO
# dirty fix
# nbray is either an int or an array. why ?
if type(nbray)==np.ndarray:
nbray=nbray[0]
# ,(i+1)xr
# sif = si[:, :].reshape(size2,order='F') # TO BE REMOVE
s_inf = s_in[:, :].reshape(ityp.size,order='F')
s_outf = s_out[:, :].reshape(ityp.size,order='F')
# 2x2,(i+1)xr
#
# self[k]['B'] 3 x 3 x i x r
#
# first unitary matrix (3x3xr)
b0 = self[k]['B'][:,:,0,:]
# first unitary matrix 1:
# dimension i and r are merged
b = self[k]['B'][:,:,1:,:].reshape(3, 3, size2-nbray,order='F')
## find used slab
##################
# find slab type for the rnstr
# nstrf is a number of slab
# this is a problem for handling subsegment
#
# seek for interactions position
################################
uD = np.where((itypf == 1))[0]
uR = np.where((itypf == 2))[0]
uT = np.where((itypf == 3))[0]
uRf = np.where((itypf == 4))[0]
uRc = np.where((itypf == 5))[0]
# assign floor and ceil slab
############################
slT = [ L.Gs.node[x]['name'] for x in nstrf[uT] ]
slR = [ L.Gs.node[x]['name'] for x in nstrf[uR] ]
# WARNING
# in future versions floor and ceil could be different for each cycle.
# this information would be directly obtained from L.Gs
# then the two following lines would have to be modified
slRf = np.array(['FLOOR']*len(uRf))
slRc = np.array(['CEIL']*len(uRc))
# Fill the used slab
#####################
tsl = np.hstack((tsl, slT))
rsl = np.hstack((rsl, slR, slRf, slRc))
if 'diffvect' in self[k]:
dw = np.hstack((dw,self[k]['diffslabs']))
## for s in uslv:
##
## T.dusl[s]=np.hstack((T.dusl[s],len(T.idx) + np.where(sl[uT]==s)[0]))
## R.dusl[s]=np.hstack((R.dusl[s],len(R.idx) + np.where(sl[uR]==s)[0]))
## R.dusl['FLOOR']=np.hstack((R.dusl['FLOOR'],len(R.idx)+len(uR) + np.where(sl[uRf]=='FLOOR')[0]))
# R.dusl['CEIL']=np.hstack((R.dusl['CEIL'],len(R.idx)+len(uR)+len(uRf) +
# np.where(sl[uRc]=='CEIL')[0]))
# Basis
# Hugr issue with B index
# Friedman version Bs was entering in the index
# maybe B can have the same index that interactions
# but this must be managed when evaluation of CIR is made
# BU 10/4/2013
# .. todo: This is no longer idxf the good index
# why the transposition b is first 2x2x(i+1)xr
# idxf is (ixr)
#
# need to check how B is used in eval()
#
# Warning
# -------
# B.idx refers to an interaction index
# whereas B0.idx refers to a ray number
# B.stack(data=b.T, idx=idxf)
# B0.stack(data=b0.T,idx=self[k]['rayidx'])
B.stack(data=b.T, idx=idxf)
B0.stack(data=b0.T,idx=self[k]['rayidx'])
### Reflexion
############
### wall reflexion
#(theta, s_in,s_out)
R.stack(data=np.array((thetaf[uR], s_inf[uR], s_outf[uR])).T,
idx=idxf[uR])
# floor reflexion
R.stack(data=np.array((thetaf[uRf], s_inf[uRf], s_outf[uRf])).T,
idx=idxf[uRf])
# ceil reflexion
R.stack(data=np.array((thetaf[uRc], s_inf[uRc], s_outf[uRc])).T,
idx=idxf[uRc])
# R.stack(data=np.array((thetaf[uR], sif[uR], sif[uR+1])).T,
# idx=idxf[uR])
# # floor reflexion
# R.stack(data=np.array((thetaf[uRf], sif[uRf], sif[uRf+1])).T,
# idx=idxf[uRf])
# # ceil reflexion
# R.stack(data=np.array((thetaf[uRc], sif[uRc], sif[uRc+1])).T,
# idx=idxf[uRc])
### sl[idxf[uT]]
# Transmision
############
# (theta, s_in,s_out)
# T.stack(data=np.array((thetaf[uT], sif[uT], sif[uT+1])).T, idx=idxf[uT])
T.stack(data=np.array((thetaf[uT], s_inf[uT], s_outf[uT])).T, idx=idxf[uT])
###
#Diffraction
#phi0,phi,si,sd,N,mat0,matN,beta
#
if 'diffvect' in self[k]:
# self[k]['diffvect'] = ((phi0,phi,beta,N) x (nb_rayxnb_interactions) )
#si and so are stacked at the end of self[k]['diffvect']
#as well:
#data = (6 x (nb_rayxnb_interactions) )
# ((phi0,phi,beta,N,sin,sout) x (nb_rayxnb_interactions) )
data = np.vstack((self[k]['diffvect'],s_inf[uD],s_outf[uD]))
D.stack(data=data.T,idx=self[k]['diffidx'])#idxf[uD])
elif self.los:
ze = np.array([0])
#self[k]['rays'] = np.array(([[0]]))
#self[k]['nbrays'] = 1
#self[k]['rayidx'] = ze
#self.raypt = 1
#self._ray2nbi=ze
B.stack(data=np.eye(3)[np.newaxis,:,:], idx=ze)
B0.stack(data=np.eye(3)[np.newaxis,:,:],idx=ze)
if len(tsl)>0:
T.create_dusl(tsl)
if len(rsl)>0:
R.create_dusl(rsl)
if len(dw)>0:
D.create_dusl(dw)
# create interactions structure
self.I = I
self.I.add([T, R, D])
# create rotation base B
self.B = B
# create rotation base B0
self.B0 = B0
self.filled = True
def eval(self,fGHz=np.array([2.4]),bfacdiv=False,ib=[]):
""" field evaluation of rays
Parameters
----------
fGHz : array
frequency in GHz
ib : list of interactions block
"""
#print 'Rays evaluation'
self.fGHz=fGHz
# evaluation of all interactions
#
# core calculation of all interactions is done here
#
self.I.eval(fGHz)
# if np.isnan(self.I.I).any():
# pdb.set_trace()
# evaluation of base B (2x2)
# B and B0 do no depend on frequency
# just an axis extension (np.newaxis)
#pdb.set_trace()
# 1 x i x 3 x 3
B = self.B.data[np.newaxis,...]
B = B.swapaxes(2,3)
# 1 x r x 3 x 3
B0 = self.B0.data[np.newaxis,...]
B0 = B0.swapaxes(2,3)
# Ct : f x r x 3 x 3
Ct = np.zeros((self.I.nf, self.nray, 3, 3), dtype=complex)
# delays : ,r
self.delays = np.zeros((self.nray))
# dis : ,r
self.dis = np.zeros((self.nray))
#nf : number of frequency point
nf = self.I.nf
aod= np.empty((2,self.nray))
aoa= np.empty((2,self.nray))
# loop on interaction blocks
if ib==[]:
ib=self.keys()
# loop over group of interactions
for l in ib:
# ir : ray index
ir = self[l]['rayidx']
aoa[:,ir]=self[l]['aoa']
aod[:,ir]=self[l]['aod']
if l != 0:
# l stands for the number of interactions
r = self[l]['nbrays']
# dirty fix should not be an array
if type(r)==np.ndarray:
r = r[0]
# reshape in order to have a 1D list of index
# reshape ray index
rrl = self[l]['rays'].reshape(r*l,order='F')
# get the corresponding evaluated interactions
#
# reshape error can be tricky to debug.
#
# f , r , l , 2 , 2
A = self.I.I[:, rrl, :, :].reshape(self.I.nf, r, l, 3, 3)
# get the corresponding unitary matrix B
# 1 , r , l , 2 , 2
#Bl = B[:, rrl, :, :].reshape(self.I.nf, r, l, 2, 2,order='F')
Bl = B[:, rrl, :, :].reshape(1, r, l, 3, 3)
# get the first unitary matrix B0l
B0l = B0[:,ir,:, :]
# get alpha
alpha = self.I.alpha[rrl].reshape(r, l,order='F')
# # get gamma
gamma = self.I.gamma[rrl].reshape(r, l,order='F')
# # get si0
si0 = self.I.si0[rrl].reshape(r, l,order='F')
# # get sout
sout = self.I.sout[rrl].reshape(r, l,order='F')
try:
del Z
except:
pass
#print "\nrays",ir
#print "-----------------------"
## loop on all the interactions of ray with l interactions
for i in range(0, l):
############################################
## # Divergence factor D
### not yet implementented
############################################
# if i == 0:
# pdb.set_trace()
# D0 = 1./si0[:,1]
# rho1 = si0[:,1]*alpha[:,i]
# rho2 = si0[:,1]*alpha[:,i]*gamma[:,i]
# D =np.sqrt(
# ( (rho1 ) / (rho1 + sout[:,i]) )
# *( (rho2) / (rho2 + sout[:,i])))
# D=D*D0
# rho1=rho1+(sout[:,i]*alpha[:,i])
# rho2=rho2+(sout[:,i]*alpha[:,i]*gamma[:,i])
#
# ## gerer le loss
# if np.isnan(D).any():
# p=np.nonzero(np.isnan(D))[0]
# D[p]=1./sout[p,1]
# else :
# D=np.sqrt(
# ( (rho1 ) / (rho1 + sout[:,i]) )
# *( (rho2) / (rho2 + sout[:,i])))
#
# rho1=rho1+(sout[:,i]*alpha[:,i])
# rho2=rho2+(sout[:,i]*alpha[:,i]*gamma[:,i])
############################################
# A0 (X dot Y)
# | | |
# v v v
##########################
## B # I # B # I # B #
##########################
# \_____/ \______/
# | |
# Atmp(i) Atmp(i+1)
#
# Z=Atmp(i) dot Atmp(i+1)
#X = A [:, :, i, :, :]
#Y = Bl[:, :, i, :, :]
# pdb.set_trace()
if i == 0:
## First Basis added
Atmp = A[:, :, i, :, :]
B00 = B0l[:, :, :, :]
Z = np.sum(Atmp[..., :, :, np.newaxis]
*B00[..., np.newaxis, :, :], axis=-2)
else:
Atmp = A[:, :, i, :, :]
BB = Bl[:, :, i-1, :, :]
Ztmp = np.sum(Atmp[..., :, :, np.newaxis]
*BB[..., np.newaxis, :, :], axis=-2)
Z = np.sum(Ztmp[..., :, :, np.newaxis]
*Z[..., np.newaxis, :, :], axis=-2)
if i == l-1:
BB = Bl[:, :, i, :, :]
Z = np.sum(BB[..., :, :, np.newaxis]
*Z[..., np.newaxis, :, :], axis=-2)
# fill the C tilde MDA
Ct[:,ir, :, :] = Z[:, :, :, :]
#
if bfacdiv:
Ct[:,ir, :, :] = Ct[:, ir, :, :]*1./(self[l]['dis'][np.newaxis, :, np.newaxis, np.newaxis])
else:
Ct[:,ir, :, :] = Ct[:, ir, :, :]*1./(self[l]['dis'][np.newaxis, :, np.newaxis, np.newaxis])
self.delays[ir] = self[l]['dis']/0.3
self.dis[ir] = self[l]['dis']
#
# true LOS when no interaction
#
if self.los:
Ct[:,0, :, :]= np.eye(3,3)[None,None,:,:]
#self[0]['dis'] = self[0]['si'][0]
# Fris
Ct[:,0, :, :] = Ct[:,0, :, :]*1./(self[0]['dis'][None, :, None, None])
self.delays[0] = self[0]['dis']/0.3
self.dis[0] = self[0]['dis']
# To be corrected in a future version
#
# Ct : nf , Nray , theta , phi
#
# to
#
# Ct : Nray x nf , theta , phi
#
Ct = np.swapaxes(Ct, 1, 0)
#c11 = Ct[:,:,0,0]
#c12 = Ct[:,:,0,1]
#c21 = Ct[:,:,1,0]
#c22 = Ct[:,:,1,1]
c11 = Ct[:,:,1,1]
c12 = Ct[:,:,1,2]
c21 = Ct[:,:,2,1]
c22 = Ct[:,:,2,2]
#
# Construction of the Ctilde propagation channel structure
#
Cn = Ctilde()
# Cn.Cpp = bs.FUsignal(self.I.fGHz, c11)
# Cn.Cpt = bs.FUsignal(self.I.fGHz, c12)
# Cn.Ctp = bs.FUsignal(self.I.fGHz, c21)
# Cn.Ctt = bs.FUsignal(self.I.fGHz, c22)
Cn.Ctt = bs.FUsignal(self.I.fGHz, c11)
Cn.Ctp = bs.FUsignal(self.I.fGHz, c12)
Cn.Cpt = bs.FUsignal(self.I.fGHz, c21)
Cn.Cpp = bs.FUsignal(self.I.fGHz, c22)
Cn.nfreq = self.I.nf
Cn.nray = self.nray
Cn.tauk = self.delays
Cn.fGHz = self.I.fGHz
# r x 2
Cn.tang = aod.T
Cn.tangl = aod.T
# r x 2
#
# recover angle of arrival convention
#
Cn.rang = np.hstack([np.pi-aoa.T[:,[0]],aoa.T[:,[1]]-np.pi])
Cn.rangl = np.hstack([np.pi-aoa.T[:,[0]],aoa.T[:,[1]]-np.pi])
# add aoa and aod
self.evaluated = True
return(Cn)
def rayfromseg(self,ls):
''' DEPRECATED
use raysfromnstr instead
'''
DeprecationWarning('function name update: use raysfromnstr instead')
return self.rayfromnstr(ls)
def rayfromnstr(self,ls):
""" returns the indexes of rays for a given interaction list
"""
if not isinstance(ls,list):
ls = [ls]
lur = []
for k in self:
aib = self[k]['sig'][0,...]
for i in ls :
# import ipdb
# ipdb.set_trace()
ui, ur = np.where(aib == i)
lur.extend(self[k]['rayidx'][ur].tolist())
return np.sort(lur)
def rayfromdelay(self,t0=0,t1=[]):
""" returns the indexes of rays between 2 timestamps t0 and t1
"""
if t1 == []:
t1 = self.delays.max()
u = np.where((self.delays>t0) & (self.delays<t1))[0]
return u
def ray2slab(self,L,ir):
""" return the slabs for a given interaction index
Parameters
----------
L : Layout
ir : interaction block
Returns
-------
numpy array of slabs strings at the shape (ir,r)
ir : number of interactions ( of the interaction block)
r : number of rays
"""
v=np.vectorize( lambda t: L.Gs.node[t]['name'] if (t!=0) and (t>0) else '_')
return v(self[ir]['sig'][0])
def ray(self, r):
""" returns the index of interactions of r
Parameters
----------
r : integer
ray index
Returns
-------
ir : nd.array
index of interactions of r
Examples
--------
"""
raypos = np.nonzero(self[self._ray2nbi[r]]['rayidx'] == r)[0]
return(self[self._ray2nbi[r]]['rays'][:,raypos][:,0])
def ir2a(self,ir):
""" index ray 2 address ray
Parameters
----------
ir : integer
Returns
-------
(ni,ux) : tuple address (group of interactions, index)
"""
assert ir < self.nray, "wrong ray index"
ni = self._ray2nbi[ir]
ur = np.where(self[ni]['rayidx']==ir)[0][0]
return(ni,ur)
def a2ir(self,t):
""" address ray 2 index ray
Parameters
----------
t = (ni,ux) : tuple address (group of interactions, index)
ray address
Returns
-------
ir : integer
ray index
"""
assert t[0] in self.keys(), "wrong number of interactions"
ir = self[t[0]]['rayidx'][t[1]]
return(ir)
def ray2ityp(self,r):
""" return interaction type for a given ray
Parameters
----------
r : integer
ray index
Returns
-------
lt : list
list of type of interactions
"""
di = {1:'D',2:'R',3:'T',4:'R',5:'R'}
sig = self.ray2sig(r)
sig = sig[1,1:-1]
return [di[s] for s in sig]
def ray2nbi(self,r):
""" Get interaction block/number of interactions of a given ray
Parameters
----------
r : integer
ray index
Returns
-------
nbi : int
interaction block number
"""
i = self._ray2nbi[r]
return i
def ray2iidx(self,ir):
""" Get interactions index of a given ray
Parameters
----------
ir : integer
ray index
Returns
-------
iidx : array
interaction index
"""
unbi = self.ray2nbi(ir)
ur = np.where(self[unbi]['rayidx']==ir)[0]
return self[unbi]['rays'][:,ur]
def ray2sig(self,ir):
""" get signature to corresponding ray
"""
unbi = self.ray2nbi(ir)
ur = np.where(self[unbi]['rayidx']==ir)[0]
return self[unbi]['sig'][:,:,ur].squeeze()
def ray2sig2d(self,ir):
""" get signature to corresponding ray
"""
sig = self.ray2sig(ir)
sig = sig.squeeze()
sig = sig[:,1:-1] # remove extremal 0
unfc = np.where(sig[1,:]<4)[0]# index floor cell
sig2d = sig[:,unfc]
return sig2d
def ray2inter(self,ir,L,Si):
""" get interaction list (Gi style) from a ray
Parameters
----------
ir : ray index
L : Layout
Si : Signatures object
"""
sig = self.ray2sig2d(ir)
return Si.sig2inter(L,sig)
def slab_nb(self, ir):
""" returns the slab numbers of r
Parameters
----------
ir : integer
ray index
Returns
-------
isl : slabs number
"""
raypos = np.nonzero(self[self._ray2nbi[ir]]['rayidx'] == ir)[0]
return(self[self._ray2nbi[ir]]['sig'][0,1:-1,raypos[0]])
def vis(self,ir,L):
typ = ['Tx'] + self.typ(ir) + ['Rx']
slab_nb = self.slab_nb(ir)
slab_nb = np.insert(slab_nb,0,0)
slab_nb = np.insert(slab_nb,len(slab_nb),0)
nbi = self._ray2nbi[ir]
raypos = np.nonzero(self[nbi]['rayidx'] == ir)[0]
pt = self[nbi]['pt'][:,:,raypos]
tz = pt[2].ravel()
slab = [ L.Gs.node[x]['name'] for x in slab_nb if x > 0]
st = ''
for t in typ:
st = st + t+' '
print(st)
st = ''
for s in slab_nb:
st = st + str(s)+' '
print(st)
st = ''
for z in tz:
st = st + str(z)+' '
print(st)
print(slab)
def typ(self, ir,fromR=True):
""" returns interactions list type of a given ray
Parameters
----------
ir : integer
ray index
fromR : bool
True : get information from signature in R
False: get information in R.I
"""
#
# In this function we can see that the ceil and floor
# are hard coded as reflection. This is going to evolve
# for implementation of multi floor
#
if fromR:
di = {0:'L',1:'D',2:'R',3:'T',4:'R',5:'R'}
nbi = self._ray2nbi[ir]
raypos = np.nonzero(self[nbi]['rayidx'] == ir)[0]
inter = self[nbi]['sig'][1,1:-1,raypos][0]
return [di[i] for i in inter]
else:
a = self.ray(r)
return(self.I.typ[a])
def dump(self,ir,L,ifGHz=0,filename='dumpray.ray'):
""" dump the full information of a ray in a file
"""
nbi = self._ray2nbi[ir]
ur = np.where(self[nbi]['rayidx']==ir)[0][0]
fd=open(filename,'w')
fd.write('ray #'+str(ir)+'\n')
fd.write(str(ur)+ ' th ray from the group of ' + str(nbi)+' Interactions' +'\n')
cy_a = L.pt2cy(self.pTx)
cy_b = L.pt2cy(self.pRx)
#fd.write('Tx #'+str(self.pTx)+'\n')
#fd.write('Rx #'+str(self.pRx)+'\n')
if self.evaluated:
ray = self.ray(ir)
typ = self.typ(ir)
slabnb = self.slab_nb(ir)
fd.write(' ray #'+str(ray)+'\n')
#fd.write(' typ #'+str(typ)+'\n')
fd.write(' slab #'+str(slabnb)+'\n')
for k in range(nbi+2):
if k==0:
fd.write('Tx : ')
elif k==(nbi+1):
fd.write('Rx : ')
else:
six = slabnb[k-1]
if six==0:
slabname='FLOOR'
cyc =[-2,-3]
else:
slabname = L.Gs.node[six]['name']
cyc = L.Gs.node[six]['ncycles']
if typ[k-1]=='T':
fd.write('T '+slabname +' ('+str(six)+','+str(cyc[0])+','+str(cyc[1])+')')
if typ[k-1]=='R':
fd.write('R '+slabname +' ('+str(six)+',)')
if typ[k-1]=='D':
fd.write('D ('+str(six)+') :')
fd.write(str(self[nbi]['pt'][:,k,ur])+'\n' )
if k==0:
fd.write(' '+str(cy_a)+'\n')
elif k==(nbi+1):
fd.write(' '+str(cy_b)+'\n')
if k==0:
for l in range(3):
if l<2:
fd.write('\t'+str(self[nbi]['Bo0'][l,:,ur])
+'\t'+str(self[nbi]['B'][l,:,0,ur])+'\n')
else:
fd.write('\t'+str(self[nbi]['Bo0'][l,:,ur]) +'\n')
elif k==(nbi+1):
for l in range(3):
fd.write('\t'+str(self[nbi]['BiN'][l,:,ur])+'\n')
else:
for l in range(3):
if l<2:
fd.write('\t'+str(self[nbi]['Bi'][l,:,k-1,ur])+'\t'+
str(self[nbi]['Bo'][l,:,k-1,ur])
+'\t'+str(self[nbi]['B'][l,:,k-1,ur])+'\n')
else:
fd.write('\t'+str(self[nbi]['Bi'][l,:,k-1,ur])+'\t'+
str(self[nbi]['Bo'][l,:,k-1,ur])+'\n')
fd.close()
def info(self,ir,ifGHz=0,bB=True,matrix=False):
""" provides information for a given ray r
Parameters
----------
ir : int
ray index
ifGHz : int
frequency index
bB: boolean
display Basis
matrix :
display matrix
"""
if self.evaluated:
print('-------------------------')
print('Informations of ray #', ir)
print('-------------------------\n')
ray = self.ray(ir)
typ = self.typ(ir)
slabnb = self.slab_nb(ir)
# if there is a diffraction, phi0, phi, beta are shown
if 'D' in typ:
diff =True
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7}, {5:10}, {6:10}, {7:4}, {8:4}, {9:4}'\
.format('Index',
'type',
'slab',
'nstr' ,
'th(rad)',
'alpha',
'gamma2',
'phi0',
'phi',
'beta'))
else :
diff =False
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7}, {5:10}, {6:10}'\
.format('Index',
'type',
'slab',
'nstr',
'th(rad)',
'alpha',
'gamma2'))
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10.2}, {6:10.2}'\
.format(ir, 'B0','-', '-', '-', '-', '-'))
for iidx, i in enumerate(typ):
# import ipdb
# ipdb.set_trace()
if i == 'T' or i == 'R' or i =='D':
I = getattr(self.I, i)
for slab in I.dusl.keys():
# print slab
midx = I.dusl[slab]
# print midx
Iidx = np.array((I.idx))[midx]
if i != 'D':
th = I.data[I.dusl[slab], 0]
gamma = I.gamma[midx]
alpha = I.alpha[midx]
else :
# from IPython.core.debugger import Tracer
# Tracer()()
th=['-']*max(max(Iidx),1)
gamma = ['NC']*max(max(Iidx),1)
alpha = ['NC']*max(max(Iidx),1)
udiff = np.where(self.I.D.idx==ray[iidx])[0]
phi0 = self.I.D.phi0[udiff][0]
phi=self.I.D.phi[udiff][0]
beta=self.I.D.beta[udiff][0]
for ii, Ii in enumerate(Iidx):
if Ii == ray[iidx]:
if i=='D':
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10}, {6:10}, {7:3.4}, {8:3.4}, {9:3.4}'\
.format(Ii, i, slab, slabnb[iidx], th[ii], alpha[ii], gamma[ii],phi0,phi,beta))
else:
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10.2}, {6:10.2}'\
.format(Ii, i, slab, slabnb[iidx], th[ii], alpha[ii], gamma[ii]))
else:
if bB:
print('{0:5} , {1:4}, {2:10}, {3:7}, {4:7.2}, {5:10.2}, {6:10.2}'.format(ray[iidx], 'B', '-', '-', '-', '-', '-'))
# print '{0:5} , {1:4}, {2:10}, {3:7}, {4:10}, {5:10}'.format(ray[iidx], i, '-', '-', '-', '-')
if matrix:
print('\n----------------------------------------')
print(' Matrix of ray #', ir, 'at f=', self.I.fGHz[ifGHz])
print('----------------------------------------')
lmat = []
ltran = []
if bB:
print('rotation matrix#', 'type: B0')
B0 = self.B0.data[ir,:,:]
addr = self.ir2a(ir)
Bo0 = self[addr[0]]['Bo0'][:,:,addr[1]]
Bi1 = self[addr[0]]['Bi'][:,:,0,addr[1]]
U = np.dot(Bi1.T,Bo0)
assert np.allclose(B0,U)
lmat.append(B0)
ltran.append(B0)
print(B0)
for iidx, i in enumerate(typ):
print('interaction #', ray[iidx], 'type:', i)
# f x l x 2 x 2
I = self.I.I[ifGHz, ray[iidx], :, :]
print(I)
lmat.append(I)
if bB:
print('rotation matrix#',[ray[iidx]], 'type: B')
B = self.B.data[ray[iidx], :, :]
print(B)
lmat.append(B)
ltran.append(B)
# evaluate matrix product
PM0=np.eye(3)
PM1=np.eye(3)
for m in lmat[::-1]:
PM0=np.dot(PM0,m)
for m in ltran[::-1]:
PM1=np.dot(PM1,m)
print("matrix product with interactions (dB)")
print(20*np.log10(np.abs(PM0[1,1])),' ',20*np.log10(np.abs(PM0[1,2])))
print(20*np.log10(np.abs(PM0[2,1])),' ',20*np.log10(np.abs(PM0[2,2])))
print("matrix product without interactions (dB)")
print(20*np.log10(np.abs(PM1[1,1])),' ',20*np.log10(np.abs(PM1[1,2])))
print(20*np.log10(np.abs(PM1[2,1])),' ',20*np.log10(np.abs(PM1[2,2])))
return(PM0)
else:
print('\nto display matrix, use matrix=True on call')
else:
print('Rays have not been evaluated yet')
def signature(self, u , typ='full'):
""" extract ray signature
Parameters
----------
u : tuple orr int
if tuple addr
if int index
Returns
-------
sig : ndarray
Notes
-----
Signature of a ray is store as a member
r[nint]['sig']
"""
if type(u)==tuple:
addr = u
else:
addr = self.ir2a(u)
if typ=='full':
sig = self[addr[0]]['sig'][:,:,addr[1]]
else:
pass
return(sig)
def show3d(self,
ray,
bdis=True,
bbas=False,
bstruc=True,
col=np.array([1, 0, 1]),
id=0,
linewidth=1):
""" plot a set of 3D rays
Parameters
----------
ray :
block : int
interaction block
bdis : Boolean
if False return .vect filename (True)
bbas : Boolean
display local basis (False)
bstruc : Boolean
display structure (True)
col : ndarray() 1x3
color of the ray ([1,0,1])
id : Integer
id of the ray (default 0)
linewidth : Integer
default 1
"""
filerac = pyu.getlong("ray" + str(id), pstruc['DIRGEOM'])
_filerac = pyu.getshort(filerac)
filename_list = filerac + '.list'
filename_vect = filerac + '.vect'
try:
fo = open(filename_vect, "w")
except:
raise NameError(filename)
fo.write("appearance { linewidth %d }\n" % linewidth)
fo.write("VECT\n")
fo.write("1 %d 1\n\n" % len(ray[0, :]))
fo.write("%d\n" % len(ray[0, :]))
fo.write("1\n")
for i in range(len(ray[0, :])):
fo.write("%g %g %g\n" % (ray[0, i], ray[1, i],
ray[2, i]))
# fo.write("%d %d %d 0\n" % (col[0],col[1],col[2]))
fo.write("%g %g %g 0\n" % (col[0], col[1], col[2]))
fo.close()
#
# Ajout des bases locales
#
fo = open(filename_list, "w")
fo.write("LIST\n")
fo.write("{<" + filename_vect + "}\n")
if (bstruc):
# fo.write("{<strucTxRx.off}\n")
fo.write("{<" + _filestr + ".off}\n")
filename = filename_list
fo.close()
if (bdis):
#
# Geomview Visualisation
#
chaine = "geomview -nopanel -b 1 1 1 " + filename + \
" 2>/dev/null &"
os.system(chaine)
else:
return(filename)
def _show3(self,L=[],rlist=[],newfig=False,cmap='hot',**kwargs):
""" plot 3D rays in environment using Mayavi
Parameters
----------
L : Layout object
Layout to be displayed
rlist : list
list of index rays
newfig : boolean (default: False)
if true create a new mayavi figure
else : use the current
ER: Ray energy
"""
if newfig:
mlab.clf()
f = mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0))
else :
f = mlab.gcf()
# view=mlab.view()
if L != []:
try:
L._filename
except:
raise NameError('L argument must be a layout object')
L._show3()
if 'ER' in kwargs:
ER = kwargs['ER']
color_range = np.linspace( 0, 1., len(ER))#np.linspace( 0, np.pi, len(ER))
uER = ER.argsort()[::-1]
colors= color_range[uER]
if rlist ==[]:
nbi = self.keys()
for i in nbi:
r = range(np.shape(self[i]['pt'])[2])
ridx = self[i]['rayidx']
# number of rays
nbr = len(r)
# current number of interactions
cnbi = i + 2
# import ipdb
# ipdb.set_trace()
pt = self[i]['pt'][:,:,r].reshape(3,cnbi*nbr,order='F')
l0 = np.array([np.arange(0,cnbi-1)+k*cnbi for k in range(nbr)]).ravel()
l1 = l0+1
connection = np.vstack((l0,l1)).T
if 'ER' in kwargs:
rc = np.repeat(colors[ridx],cnbi)
rc[::cnbi]=0
src = mlab.pipeline.scalar_scatter(pt[0,:], pt[1,:], pt[2,:],rc,colormap=cmap)
else:
src = mlab.pipeline.scalar_scatter(pt[0,:], pt[1,:], pt[2,:])
src.mlab_source.dataset.lines=connection
src.update()
lines = mlab.pipeline.stripper(src)
mlab.pipeline.surface(lines,opacity=0.5,colormap=cmap)
f.children[-1].name='Rays with ' + str(i) + 'interactions'
else :
nbi = self._ray2nbi[rlist]
nr = np.array((nbi,rlist))
unb = np.unique(nr[0,:])
unr = {int(i):np.where(nr[0,:]==i)[0] for i in unb}
for i in unb:
raynb = (nr[1,unr[i]]).astype(int)
nbr=len(raynb)
ptidx = [np.where(self[i]['rayidx']==x)[0][0] for x in raynb]
# current number of interactions
cnbi = i + 2
pt = self[i]['pt'][:,:,ptidx].reshape(3,cnbi*nbr,order='F')
# lines = np.arange(cnbi*nbr).reshape(cnbi,nbr)
lines = np.arange(cnbi*nbr).reshape(nbr,cnbi)
# mesh = tvtk.PolyData(points=pt.T, polys=lines)
mesh = tvtk.PolyData(points=pt.T, polys=lines)
mlab.pipeline.surface(mlab.pipeline.extract_edges(mesh),
color=(0, 0, 0), )
f.children[-1].name='Rays with ' + str(int(i)) + 'interactions'
# mlab.view(view[0],view[1],view[2],view[3])
return(f)
def show3(self,
L=[],
bdis=True,
bstruc=True,
bbasi = False,
bbaso = False,
id=0,
ilist=[],
raylist=[],centered=True):
""" plot 3D rays within the simulated environment
Parameters
----------
bdis : boolean
True
bstruc : boolean
True
bbasi : boolean
display input basis of each interaction of rays
bbaso : boolean
display ouput basis of each interaction of rays
id : int
L : Layout object
Layout to be displayed
ilist : list of group of interactions
raylist : list of index rays
centered : boolean
if True center the layout before display
"""
try:
L._filename
except:
raise NameError('L argument must be a layout object')
if not centered:
pg=np.array([[0],[0],[0]])
strucname= L._filename.split('.')[0]
pg = L.geomfile(centered=centered)
pg = np.hstack((pg,0.)).reshape(3,1)
if ilist == []:
ilist = self.keys()
pTx = self.pTx.reshape((3, 1))-pg
pRx = self.pRx.reshape((3, 1))-pg
filename = pyu.getlong("grRay" + str(id) + ".list", pstruc['DIRGEOM'])
fo = open(filename, "w")
fo.write("LIST\n")
if bstruc:
fo.write("{<"+strucname+".off}\n")
if bbasi:
if not self.isbased:
raise NameError('Bases have not been computed (self.locbas(Layout)')
else:
base_listi = geu.Geomlist('baselisti',clear=True)
base_listi.append("LIST\n")
if bbaso:
if not self.isbased:
raise NameError('Bases have not been computed (self.locbas(Layout)')
else:
base_listo = geu.Geomlist('baselisto',clear=True)
base_listo.append("LIST\n")
# fo.write("{<strucTxRx.off}\n")
k = 0
for i in ilist:
if raylist == []:
rlist = range(np.shape(self[i]['pt'])[2])
else:
rlist = raylist
for j in rlist:
ray = np.hstack((pTx,np.hstack((self[i]['pt'][:, :, j]-pg, pRx))))
# ray = rays[i]['pt'][:,:,j]
col = np.array([0, 0, 0])
# print ray
fileray = self.show3d(ray=ray, bdis=False,
bstruc=False, col=col, id=k)
k += 1
fo.write("{< " + fileray + " }\n")
if bbasi:
for inter in range(i):
filebi = 'bi_' + str(j) + '_' + str(i) + '_' +str(inter)
basi = geu.GeomVect(filebi)
basi.geomBase(self[i]['Bi'][:,:,inter,j],pt=self[i]['pt'][:,inter+1,j]-pg[:,0])
base_listi.append("{<" + filebi +'.vect' "}\n")
filebi = 'bi_' + str(j) + '_' + str(i) + '_' +str(inter-1)
basi = geu.GeomVect(filebi)
basi.geomBase(self[i]['BiN'][:,:,j],pt=self[i]['pt'][:,-1,j]-pg[:,0])
base_listi.append("{<" + filebi +'.vect' "}\n")
if bbaso:
for inter in range(i):
filebo = 'bo_' + str(j) + '_' + str(i) + '_' +str(inter)
baso = geu.GeomVect(filebo)
baso.geomBase(self[i]['Bo'][:,:,inter,j],pt=self[i]['pt'][:,inter+1,j]-pg[:,0])
base_listo.append("{<" + filebo +'.vect' "}\n")
filebo = 'bo_' + str(j) + '_' + str(i) + '_' +str(inter+1)
baso = geu.GeomVect(filebo)
baso.geomBase(self[i]['Bo0'][:,:,j],pt=self[i]['pt'][:,0,j]-pg[:,0])
base_listo.append("{<" + filebo +'.vect' "}\n")
if bbasi:
fo.write("{< " + "baselisti.list}\n")
if bbaso:
fo.write("{< " + "baselisto.list}\n")
fo.close()
if (bdis):
chaine = "geomview " + filename + " 2>/dev/null &"
os.system(chaine)
else:
return(filename)
if __name__ == "__main__":
doctest.testmod()
|
from SPI.BaseTest import SPIBaseTest
class PySysTest(SPIBaseTest):
def execute(self):
self.start()
self.correlator.injectMonitorscript(filenames=['tutorial.mon'])
self.correlator.sendEventStrings('Step(1)')
print "Waiting for rising edge on pin 19"
channel = self.waitForEdge(19, False)
print "Finished waiting"
# in future, add this, but currently the waitForEdge function doesn't seem to work
#self.assertTrue(channel is not None)
self.waitForSignal('correlator.out', expr='Step 1 complete', errorExpr=['TEST FAILED'], timeout=10)
self.checkPin(19, 0)
def validate(self):
pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.