input
stringlengths
2.65k
237k
output
stringclasses
1 value
from __future__ import absolute_import, print_function, division import os.path as osp import time import datetime import numpy as np import cv2 import torch import torch.nn as nn from torch.nn import functional as F from torch.utils.tensorboard import SummaryWriter from ..utils import ( MetricMeter, AverageMeter, open_specified_layers, open_all_layers, visualize_ranked_results, save_checkpoint, re_ranking, mkdir_if_missing, visualize_ranked_activation_results, visualize_ranked_threshold_activation_results, visualize_ranked_mask_activation_results) from ..losses import DeepSupervision from .. import metrics GRID_SPACING = 10 VECT_HEIGHT = 10 class Engine(object): r"""A generic base Engine class for both image- and video-reid. Args: datamanager (DataManager): an instance of ``deepreid.data.ImageDataManager`` or ``deepreid.data.VideoDataManager``. model (nn.Module): model instance. optimizer (Optimizer): an Optimizer. scheduler (LRScheduler, optional): if None, no learning rate decay will be performed. use_gpu (bool, optional): use gpu. Default is True. """ def __init__(self, datamanager, model, optimizer=None, scheduler=None, use_gpu=True): self.datamanager = datamanager self.train_loader, self.test_loader = self.datamanager.return_dataloaders() self.use_gpu = (torch.cuda.is_available() and use_gpu) self.writer = None self.max_epoch = 0 self.model = model self.optimizer = optimizer self.scheduler = scheduler # Check attributes if not isinstance(self.model, nn.Module): raise TypeError('model must be an instance of nn.Module') def update_lr(self, names=None): names = self.get_model_names(names) for name in names: if self._scheds[name] is not None: self._scheds[name].step() def run(self, aim_sess, save_dir='log', max_epoch=0, start_epoch=0, fixbase_epoch=0, open_layers=None, start_eval=0, eval_freq=-1, test_only=False, print_freq=10, dist_metric='euclidean', normalize_feature=False, visrank=False, visrankactiv=False, visrankactivthr=False, maskthr=0.7, visrank_topk=10, use_metric_cuhk03=False, ranks=[1, 5, 10, 20], rerank=False, visactmap=False, vispartmap=False, visdrop=False, visdroptype='random'): """A unified pipeline for training and evaluating a model. :param aim_sess: aim recorder :param save_dir: directory to save model. :param max_epoch: maximum epoch. :param start_epoch: (int, optional) starting epoch. Default is 0. :param fixbase_epoch: (int, optional) number of epochs to train ``open_layers`` (new layers) while keeping base layers frozen. Default is 0. ``fixbase_epoch`` is counted in ``max_epoch``. :param open_layers: (str or list, optional) layers (attribute names) open for training. start_eval (int, optional): from which epoch to start evaluation. Default is 0. eval_freq (int, optional): evaluation frequency. Default is -1 (meaning evaluation is only performed at the end of training). :param start_eval: :param eval_freq: :param test_only: (bool, optional) if True, only runs evaluation on test datasets. Default is False. :param print_freq: (int, optional) print_frequency. Default is 10. :param dist_metric: (str, optional) distance metric used to compute distance matrix between query and gallery. Default is "euclidean". :param normalize_feature: (bool, optional) performs L2 normalization on feature vectors before computing feature distance. Default is False. :param visrank: (bool, optional) visualizes ranked results. Default is False. It is recommended to enable ``visrank`` when ``test_only`` is True. The ranked images will be saved to "save_dir/visrank_dataset", e.g. "save_dir/visrank_market1501". :param visrankactiv: :param visrankactivthr: :param maskthr: :param visrank_topk: (int, optional) top-k ranked images to be visualized. Default is 10. use_metric_cuhk03 (bool, optional): use single-gallery-shot setting for cuhk03. Default is False. This should be enabled when using cuhk03 classic split. :param use_metric_cuhk03: :param ranks: (list, optional) cmc ranks to be computed. Default is [1, 5, 10, 20]. :param rerank: (bool, optional) uses person re-ranking (by Zhong et al. CVPR'17). Default is False. This is only enabled when test_only=True. :param visactmap: (bool, optional) visualizes activation maps. Default is False. :param vispartmap: :param visdrop: :param visdroptype: :return: """ if visrank and not test_only: raise ValueError('visrank=True is valid only if test_only=True') if visrankactiv and not test_only: raise ValueError('visrankactiv=True is valid only if test_only=True') if visrankactivthr and not test_only: raise ValueError('visrankactivthr=True is valid only if test_only=True') if visdrop and not test_only: raise ValueError('visdrop=True is valid only if test_only=True') if test_only: self.test( 0, dist_metric=dist_metric, normalize_feature=normalize_feature, visrank=visrank, visrankactiv=visrankactiv, visrank_topk=visrank_topk, save_dir=save_dir, use_metric_cuhk03=use_metric_cuhk03, ranks=ranks, rerank=rerank, maskthr=maskthr, visrankactivthr=visrankactivthr, visdrop=visdrop, visdroptype=visdroptype ) return if self.writer is None: self.writer = SummaryWriter(log_dir=save_dir) if visactmap: self.visactmap(self.test_loader, save_dir, self.datamanager.width, self.datamanager.height, print_freq) return if vispartmap: self.vispartmap(self.test_loader, save_dir, self.datamanager.width, self.datamanager.height, print_freq) return time_start = time.time() self.max_epoch = max_epoch print('=> Start training') for epoch in range(start_epoch, max_epoch): losses = self.train(print_freq=print_freq, fixbase_epoch=fixbase_epoch, open_layers=open_layers, epoch=epoch) # AIM recorder (acc, loss) for key in losses.meters.keys(): aim_sess.track(losses.meters[key].avg, name=key, epoch=epoch, subset='train') if (epoch+1) >= start_eval and eval_freq > 0 and (epoch+1) % eval_freq == 0 and (epoch+1) != max_epoch: rank1 = self.test( epoch, aim_sess=aim_sess, dist_metric=dist_metric, normalize_feature=normalize_feature, visrank=visrank, visrankactiv=visrankactiv, visrank_topk=visrank_topk, save_dir=save_dir, use_metric_cuhk03=use_metric_cuhk03, ranks=ranks, rerank=rerank, maskthr=maskthr, visrankactivthr=visrankactivthr ) self._save_checkpoint(epoch, rank1, save_dir) if max_epoch > 0: print('=> Final test') rank1 = self.test( epoch, dist_metric=dist_metric, normalize_feature=normalize_feature, visrank=visrank, visrankactiv=visrankactiv, visrank_topk=visrank_topk, save_dir=save_dir, use_metric_cuhk03=use_metric_cuhk03, ranks=ranks, rerank=rerank, maskthr=maskthr, visrankactivthr=visrankactivthr ) self._save_checkpoint(epoch, rank1, save_dir) elapsed = round(time.time() - time_start) elapsed = str(datetime.timedelta(seconds=elapsed)) print('Elapsed {}'.format(elapsed)) if self.writer is None: self.writer.close() def train(self, epoch=0, print_freq=10, fixbase_epoch=0, open_layers=None): losses = MetricMeter() batch_time = AverageMeter() data_time = AverageMeter() self.model.train() self.two_stepped_transfer_learning(epoch, fixbase_epoch, open_layers) num_batches = len(self.train_loader) end = time.time() for batch_idx, data in enumerate(self.train_loader): data_time.update(time.time() - end) loss_summary = self.forward_backward(data, epoch) batch_time.update(time.time() - end) losses.update(loss_summary) if (batch_idx + 1) % print_freq == 0: # Estimate remaining time nb_this_epoch = num_batches - (batch_idx + 1) nb_future_epochs = (self.max_epoch - (epoch + 1)) * num_batches eta_seconds = batch_time.avg * (nb_this_epoch+nb_future_epochs) eta_str = str(datetime.timedelta(seconds=int(eta_seconds))) print( '[Epoch][Batch]: [{0}/{1}][{2}/{3}]\t' 'eta {eta}\t' '{losses}\t' 'lr {lr:.6f}'.format( epoch + 1, self.max_epoch, batch_idx + 1, num_batches, eta=eta_str, losses=losses, lr=self.optimizer.param_groups[0]['lr'], ) ) if self.writer is not None: n_iter = epoch * num_batches + batch_idx self.writer.add_scalar('Train/time', batch_time.avg, n_iter) self.writer.add_scalar('Train/data', data_time.avg, n_iter) for name, meter in losses.meters.items(): self.writer.add_scalar('Train/' + name, meter.avg, n_iter) self.writer.add_scalar('Train/lr', self.optimizer.param_groups[-1]['lr'], n_iter) end = time.time() if self.scheduler is not None: self.scheduler.step() return losses def forward_backward(self, data, epoch=None): raise NotImplementedError def test(self, epoch, aim_sess=None, dist_metric='euclidean', normalize_feature=False, visrank=False, visrankactiv=False, visrank_topk=10, save_dir='', use_metric_cuhk03=False, ranks=[1, 5, 10, 20], rerank=False, maskthr=0.7, visrankactivthr=False, visdrop=False, visdroptype='random'): r"""Tests model on target datasets. .. note:: This function has been called in ``run()``. .. note:: The test pipeline implemented in this function suits both image- and video-reid. In general, a subclass of Engine only needs to re-implement ``_extract_features()`` and ``_parse_data_for_eval()`` (most of the time), but not a must. Please refer to the source code for more details. """ targets = list(self.test_loader.keys()) for name in targets: domain = 'source' if name in self.datamanager.sources else 'target' print('##### Evaluating {} ({}) #####'.format(name, domain)) queryloader = self.test_loader[name]['query'] galleryloader = self.test_loader[name]['gallery'] rank1 = self._evaluate( epoch, aim_sess=aim_sess, dataset_name=name, queryloader=queryloader, galleryloader=galleryloader, dist_metric=dist_metric, normalize_feature=normalize_feature, visrank=visrank, visrankactiv=visrankactiv, visrank_topk=visrank_topk, save_dir=save_dir, use_metric_cuhk03=use_metric_cuhk03, ranks=ranks, rerank=rerank, maskthr=maskthr, visrankactivthr=visrankactivthr, visdrop=visdrop, visdroptype=visdroptype ) return rank1 @torch.no_grad() def _evaluate(self, epoch, aim_sess=None, dataset_name='', queryloader=None, galleryloader=None, dist_metric='euclidean', normalize_feature=False, visrank=False, visrankactiv=False, visrank_topk=10, save_dir='', use_metric_cuhk03=False, ranks=[1, 5, 10, 20], rerank=False, visrankactivthr=False, maskthr=0.7, visdrop=False, visdroptype='random'): batch_time = AverageMeter() print('Extracting features from query set ...') # Terms: query features, query activations, query person IDs, query camera IDs and image drop masks qf, qa, q_pids, q_camids, qm = [], [], [], [], [] for _, data in enumerate(queryloader): imgs, pids, camids = self._parse_data_for_eval(data) if self.use_gpu: imgs = imgs.cuda() end = time.time() features = self._extract_features(imgs) activations = self._extract_activations(imgs) dropmask = self._extract_drop_masks(imgs, visdrop, visdroptype) batch_time.update(time.time() - end) features = features.data.cpu() qf.append(features) qa.append(torch.Tensor(activations)) qm.append(torch.Tensor(dropmask)) q_pids.extend(pids) q_camids.extend(camids) qf = torch.cat(qf, 0) qm = torch.cat(qm, 0) qa = torch.cat(qa, 0) q_pids = np.asarray(q_pids) q_camids = np.asarray(q_camids) print('Done, obtained {}-by-{} matrix'.format(qf.size(0), qf.size(1))) print('Extracting features from gallery set ...') # Gallery features, Gallery activations, Gallery person IDs, Gallery camera IDs and Image drop masks gf, ga, g_pids, g_camids, gm = [], [], [], [], [] for _, data in enumerate(galleryloader): imgs, pids, camids = self._parse_data_for_eval(data) if self.use_gpu: imgs = imgs.cuda() end = time.time() features = self._extract_features(imgs) activations = self._extract_activations(imgs) dropmask = self._extract_drop_masks(imgs, visdrop, visdroptype) batch_time.update(time.time() - end) features = features.data.cpu() gf.append(features) ga.append(torch.Tensor(activations)) gm.append(torch.Tensor(dropmask)) g_pids.extend(pids) g_camids.extend(camids) gf = torch.cat(gf, 0) gm = torch.cat(gm, 0) ga = torch.cat(ga, 0) g_pids = np.asarray(g_pids) g_camids = np.asarray(g_camids) print('Done, obtained {}-by-{} matrix'.format(gf.size(0), gf.size(1))) print('Speed: {:.4f} sec/batch'.format(batch_time.avg)) if normalize_feature: print('Normalizing features with L2 norm ...') qf = F.normalize(qf, p=2, dim=1) gf = F.normalize(gf, p=2, dim=1) print('Computing distance matrix with metric={} ...'.format(dist_metric)) distmat = metrics.compute_distance_matrix(qf, gf, dist_metric) distmat = distmat.numpy() # Always show results without re-ranking first print('Computing CMC and mAP ...') cmc, mAP = metrics.evaluate_rank( distmat, q_pids, g_pids, q_camids, g_camids, use_metric_cuhk03=use_metric_cuhk03 ) print('** Results **') print('mAP: {:.1%}'.format(mAP)) print('CMC curve') for r in ranks: print('Rank-{:<3}: {:.1%}'.format(r, cmc[r-1])) # AIM recorder (mAP, Rank-1, Rank-5, Rank-10, Rank-20) if aim_sess: aim_sess.track(mAP, name='mAP', epoch=epoch, subset='train') for r in ranks: aim_sess.track(cmc[r-1], name='Rank-{:<3}'.format(r), epoch=epoch, subset='train') if rerank: print('Applying person re-ranking ...')
<filename>dataset_specific/hippocampus/generator/AugmentationGenerator.py import os import random as rn from enum import Enum import SimpleITK as sitk import numpy as np rn.seed(1235) write_flag = False OUTPUT_DIR = '/cache/suhita/' reference_size = [48, 64, 48] reference_spacing = [1.0, 1.0, 1.0] dimension = 3 nrClasses = 1 # gt_shape = [32, 168, 168, nrClasses] class AugmentTypes(Enum): ROTATE = 0 TRANSLATION_3D = 1 SCALE = 2 ORIGINAL = 3 FLIP_HORIZ = 4 def resampleImage(inputImage, newSpacing, interpolator, defaultValue): # castImageFilter = sitk.CastImageFilter() # castImageFilter.SetOutputPixelType(sitk.sitkFloat32) # inputImage = castImageFilter.Execute(inputImage) oldSize = inputImage.GetSize() oldSpacing = inputImage.GetSpacing() newWidth = oldSpacing[0] / newSpacing[0] * oldSize[0] newHeight = oldSpacing[1] / newSpacing[1] * oldSize[1] newDepth = oldSpacing[2] / newSpacing[2] * oldSize[2] newSize = [int(newWidth), int(newHeight), int(newDepth)] # minFilter = sitk.StatisticsImageFilter() # minFilter.Execute(inputImage) # minValue = minFilter.GetMinimum() filter = sitk.ResampleImageFilter() inputImage.GetSpacing() filter.SetOutputSpacing(newSpacing) filter.SetInterpolator(interpolator) filter.SetOutputOrigin(inputImage.GetOrigin()) filter.SetOutputDirection(inputImage.GetDirection()) filter.SetSize(newSize) filter.SetDefaultPixelValue(defaultValue) outImage = filter.Execute(inputImage) return outImage def resampleToReference(inputImg, referenceImg, interpolator, defaultValue): # castImageFilter = sitk.CastImageFilter() # castImageFilter.SetOutputPixelType(sitk.sitkFloat32) # inputImg = castImageFilter.Execute(inputImg) # minFilter = sitk.StatisticsImageFilter() # minFilter.Execute(inputImg) filter = sitk.ResampleImageFilter() filter.SetReferenceImage(referenceImg) filter.SetDefaultPixelValue(float(defaultValue)) ## -1 # float('nan') filter.SetInterpolator(interpolator) outImage = filter.Execute(inputImg) return outImage def augment_images_spatial(original_image, reference_image, augmentation_type, T0, T_aug, transformation_parameters, interpolator=sitk.sitkLinear, default_intensity_value=0.0): # interpolator = sitk.sitkNearestNeighbor if augmentation_type == AugmentTypes.FLIP_HORIZ.value: arr = sitk.GetArrayFromImage(original_image) arr = np.flip(arr, axis=2) aug_image = sitk.GetImageFromArray(arr) aug_image.CopyInformation(original_image) else: for current_parameters in transformation_parameters: T_aug.SetParameters(current_parameters) # Augmentation is done in the reference image space, so we first map the points from the reference image space # back onto itself T_aug (e.g. rotate the reference image) and then we map to the original_classification image space T0. T_all = sitk.Transform(T0) T_all.AddTransform(T_aug) aug_image = sitk.Resample(original_image, reference_image, T_all, interpolator, default_intensity_value) # TODO: check if this resampling is necessary? aug_image = resampleToReference(aug_image, reference_image, interpolator, default_intensity_value) return aug_image def check_if_doubles(rand_vectors, new_vector): for vec in rand_vectors: if new_vector == vec: return True else: return False def write_image(image, image_name, ref_image, is_image=False): if (write_flag): writer = sitk.ImageFileWriter() writer.SetFileName(image_name) if not is_image: temp = sitk.GetImageFromArray(image) else: temp = image temp.CopyInformation(ref_image) writer.Execute(temp) def similarity3D_parameter_space_regular_sampling(thetaX, thetaY, thetaZ, tx, ty, tz, scale): ''' Create a list representing a regular sampling of the 3D similarity transformation parameter space. As the SimpleITK rotation parameterization uses the vector portion of a versor we don't have an intuitive way of specifying rotations. We therefor use the ZYX Euler angle parametrization and convert to versor. Args: thetaX, thetaY, thetaZ: numpy ndarrays with the Euler angle values to use. tx, ty, tz: numpy ndarrays with the translation values to use. scale: numpy array with the scale values to use. Return: List of lists representing the parameter space sampling (vx,vy,vz,tx,ty,tz,s). ''' return [list(eul2quat(parameter_values[0], parameter_values[1], parameter_values[2])) + [np.asscalar(p) for p in parameter_values[3:]] for parameter_values in np.nditer(np.meshgrid(thetaX, thetaY, thetaZ, tx, ty, tz, scale))] def eul2quat(ax, ay, az, atol=1e-8): ''' Translate between Euler angle (ZYX) order and quaternion representation of a rotation. Args: ax: X rotation angle in radians. ay: Y rotation angle in radians. az: Z rotation angle in radians. atol: tolerance used for stable quaternion computation (qs==0 within this tolerance). Return: Numpy array with three entries representing the vectorial component of the quaternion. ''' # Create rotation matrix using ZYX Euler angles and then compute quaternion using entries. cx = np.cos(ax) cy = np.cos(ay) cz = np.cos(az) sx = np.sin(ax) sy = np.sin(ay) sz = np.sin(az) r = np.zeros((3, 3)) r[0, 0] = cz * cy r[0, 1] = cz * sy * sx - sz * cx r[0, 2] = cz * sy * cx + sz * sx r[1, 0] = sz * cy r[1, 1] = sz * sy * sx + cz * cx r[1, 2] = sz * sy * cx - cz * sx r[2, 0] = -sy r[2, 1] = cy * sx r[2, 2] = cy * cx # Compute quaternion: qs = 0.5 * np.sqrt(r[0, 0] + r[1, 1] + r[2, 2] + 1) qv = np.zeros(3) # If the scalar component of the quaternion is close to zero, we # compute the vector part using a numerically stable approach if np.isclose(qs, 0.0, atol): i = np.argmax([r[0, 0], r[1, 1], r[2, 2]]) j = (i + 1) % 3 k = (j + 1) % 3 w = np.sqrt(r[i, i] - r[j, j] - r[k, k] + 1) qv[i] = 0.5 * w qv[j] = (r[i, j] + r[j, i]) / (2 * w) qv[k] = (r[i, k] + r[k, i]) / (2 * w) else: denom = 4 * qs qv[0] = (r[2, 1] - r[1, 2]) / denom; qv[1] = (r[0, 2] - r[2, 0]) / denom; qv[2] = (r[1, 0] - r[0, 1]) / denom; return qv def get_reference_image(image): # Create the reference image with a zero origin, identity direction cosine matrix and dimension reference_origin = np.zeros(dimension) reference_direction = np.identity(dimension).flatten() reference_image = sitk.Image(reference_size, image.GetPixelIDValue()) reference_image.SetOrigin(reference_origin) reference_image.SetSpacing(reference_spacing) reference_image.SetDirection(reference_direction) return reference_image def get_augmentation_transform(img, reference_image, augmentation_type): aug_transform = sitk.Similarity2DTransform() if dimension == 2 else sitk.Similarity3DTransform() reference_origin = np.zeros(dimension) reference_center = np.array( reference_image.TransformContinuousIndexToPhysicalPoint(np.array(reference_image.GetSize()) / 2.0)) transform = sitk.AffineTransform(dimension) transform.SetMatrix(img.GetDirection()) transform.SetTranslation(np.array(img.GetOrigin()) - reference_origin) # Modify the transformation to align the centers of the original_classification and reference image instead of their origins. centering_transform = sitk.TranslationTransform(dimension) img_center = np.array(img.TransformContinuousIndexToPhysicalPoint(np.array(img.GetSize()) / 2.0)) centering_transform.SetOffset(np.array(transform.GetInverse().TransformPoint(img_center) - reference_center)) centered_transform = sitk.Transform(transform) centered_transform.AddTransform(centering_transform) # Set the augmenting transform's center so that rotation is around the image center. aug_transform.SetCenter(reference_center) delta_Arr = [np.random.uniform(-0.174533, 0.174533), np.random.uniform(-0.174533, 0.174533), 0.0, np.random.uniform(-0.174533, 0.174533), np.random.uniform(-0.174533, 0.174533)] translationsXY_Arr = [np.random.uniform(-4, 4), np.random.uniform(-4, 4), 0.0, np.random.uniform(-4, 4), np.random.uniform(-4, 4)] # in mm translationsZ_Arr = [np.random.uniform(-6, 6), np.random.uniform(-6, 6), 0.0, np.random.uniform(-6, 6), np.random.uniform(-6, 6)] scale_factor = np.random.uniform(0.9, 1.1) # vecs = [] rand_vec = np.array( [rn.randint(0, 4), rn.randint(0, 4), rn.randint(1, 3), rn.randint(0, 4), rn.randint(0, 4), rn.randint(0, 4), rn.randint(0, 2), rn.randint(0, 1)], dtype=int) # print(rand_vec) if AugmentTypes.ROTATE.value == augmentation_type: delta_x = delta_Arr[rand_vec[0]] delta_y = delta_Arr[rand_vec[1]] delta_z = delta_Arr[rand_vec[2]] transl_x = translationsXY_Arr[rand_vec[3]] transl_y = translationsXY_Arr[rand_vec[4]] transl_z = translationsZ_Arr[rand_vec[5]] scale = 1. elif AugmentTypes.SCALE.value == augmentation_type: delta_x = 0. delta_y = 0 delta_z = 0. transl_x = 0. transl_y = 0. transl_z = 0. scale = scale_factor elif AugmentTypes.TRANSLATION_3D.value == augmentation_type: delta_x = 0. delta_y = 0 delta_z = 0. transl_x = translationsXY_Arr[rand_vec[3]] transl_y = translationsXY_Arr[rand_vec[4]] transl_z = translationsZ_Arr[rand_vec[5]] scale = 1. else: delta_x = 0. delta_y = 0 delta_z = 0. transl_x = 0. transl_y = 0. transl_z = 0. scale = 1. transformation_parameters_list = similarity3D_parameter_space_regular_sampling([delta_x], [delta_y], [delta_z], [transl_x], [transl_y], [transl_z], [scale]) return centered_transform, aug_transform, transformation_parameters_list def get_transformed_gt(orig_gt, ref_image, augmentation_type, centered_transform, aug_transform, transformation_parameters_list, distance_based_interpol=True): nrClasses = 3 gt_shape = [reference_size[2], reference_size[1], reference_size[0], nrClasses] if distance_based_interpol: res_gt = np.zeros(gt_shape, dtype=np.uint8) orig_gt = np.where(orig_gt > 0.5, np.ones_like(orig_gt), np.zeros_like(orig_gt)) orig_gt = orig_gt.astype('int64') gt_distances = np.zeros(gt_shape) for c in range(0, nrClasses): orig_img_gt = sitk.GetImageFromArray(orig_gt[:, :, :, c]) orig_img_gt.SetSpacing(reference_spacing) # write_image(orig_img_gt, os.path.join(OUTPUT_DIR, 'orig_gt' + str(zone) + '.nrrd'), orig_img_gt, is_image=True) gt_dist = sitk.SignedMaurerDistanceMap(orig_img_gt, insideIsPositive=True, squaredDistance=False, useImageSpacing=True) resampled_dist = augment_images_spatial(gt_dist, ref_image, augmentation_type, centered_transform, aug_transform, transformation_parameters_list, default_intensity_value=-3000, interpolator=sitk.sitkLinear) gt_distances[:, :, :, c] = sitk.GetArrayFromImage(resampled_dist) # assign the final GT array the zone of the lowest distance for x in range(0, orig_img_gt.GetSize()[0]): for y in range(0, orig_img_gt.GetSize()[1]): for z in range(0, orig_img_gt.GetSize()[2]): array = [gt_distances[z, y, x, 0], gt_distances[z, y, x, 1], gt_distances[z, y, x, 2]] maxValue = max(array) if maxValue == -3000: res_gt[z, y, x, 0] = 1 else: max_index = array.index(maxValue) res_gt[z, y, x, max_index] = 1 else: # works only for one class ! orig_img_gt = sitk.GetImageFromArray(orig_gt) orig_img_gt.SetSpacing(reference_spacing) res_img_gt = augment_images_spatial(orig_img_gt, ref_image, augmentation_type, centered_transform, aug_transform, transformation_parameters_list, default_intensity_value=0, interpolator=sitk.sitkNearestNeighbor) # sitk.WriteImage(res_img_gt, 'res_GT.nrrd') # res_gt = np.zeros(gt_shape, dtype=np.uint8) res_gt = sitk.GetArrayFromImage(res_img_gt) return res_gt def get_single_image_augmentation(augmentation_type, orig_image, orig_gt, distance_based_interpol=True): out_img = np.zeros([reference_size[2], reference_size[1], reference_size[0], 1], dtype=np.float32) # out_gt = np.zeros([reference_size[2], reference_size[1], reference_size[0], nrClasses], dtype=np.uint8) img = sitk.GetImageFromArray(orig_image) # img1.SetSpacing(reference_spacing) reference_image = get_reference_image(img) # img = sitk.GetImageFromArray(orig_image) img.SetSpacing(reference_spacing) # write_image(img, os.path.join(OUTPUT_DIR, 'orig_image' + str(img_no) + '.nrrd'), reference_image, is_image=True) centered_transform, aug_transform, transformation_parameters_list = get_augmentation_transform(img, reference_image, augmentation_type) # transform image res_img = augment_images_spatial(img, reference_image, augmentation_type, centered_transform, aug_transform, transformation_parameters_list) # sitk.WriteImage(res_img, 'res_img.nrrd') out_img[:, :, :, 0] = sitk.GetArrayFromImage(res_img) # transform gt gt_ref = sitk.GetImageFromArray(orig_gt) gt_ref.SetSpacing(reference_spacing) # write_image(res_img, os.path.join(OUTPUT_DIR, 'changed_image' + str(img_no) + '_' + AugmentTypes( # augmentation_type).name +
<reponame>GKO95/quick_draw_cnn<gh_stars>1-10 import tensorflow as tf import tensorflow.keras as keras import matplotlib.pyplot as plt import numpy as np import random import os """ TRAINING DATASET: np.load(os.path.join(DATAPATH, DATAFILE[INDEX])) = [[0 0 0 ... 0 0 0] <- 0th INSTANCE (len: 784 = 28 * 28) [0 0 0 ... 0 0 0] <- 1st INSTANCE [0 0 0 ... 0 0 0] <- 2nd INSTANCE ... [0 0 0 ... 0 0 0] <- Xth INSTANCE [0 0 0 ... 0 0 0] <- Yth INSTANCE [0 0 0 ... 0 0 0]] <- Zth INSTANCE shape: (???, 784) """ # DATASET: Training preparation. BINPATH = "bin" DATAPATH = "datasets" DATAFILE = [file for file in os.listdir(DATAPATH) if os.path.isfile(os.path.join(DATAPATH, file))] DATANAME = [name.split('.')[0].split('_')[-1] for name in DATAFILE] DATANUMB = [0] * len(DATAFILE) def NeuralNetwork(): """ 2D CONVOLUTION LAYER > Reference: https://en.wikipedia.org/wiki/Convolutional_neural_network#Convolutional_layer The Conv2D layer has a depth of filters (aka. a number of kernels; 32 or 64) that detects certain pixel pattern using convolution operation. These kernels stride/shift along the input data pixels-by-pixels, resulting a single scalar value: Conv2D Operation -> Summation of element-wise multiplication Hence, the portion of the input data that has less similar traits to the pattern results relatively low value. And higher value means higher similarity to the kernel. The output size is determined by the following formula: * Input: Input size * Kernel: Kernel size * Stride: Pixels to stride/shift (generally stride < 3) * Padding: 0 when padding is set to 'valid' Output: => size: [(Input - Kernel + (2 * Padding)) / Stride] + 1 => shape: (size, size, depth) ...but if the output size is not an integer, the strides are incorrect! Example: >> Input = (28, 28), Depth = 32, Kernel = (5, 5), Stride = (1, 1)... => size: [(28 - 5 + (2 * 0)) / 1] + 1 = 24 => shape: (24, 24, 32) Meanwhile, when padding is set as the 'same', the input data is padded with zeros to make the output data have the same size as the input data (right & bottom has a higher priority than left & top). Thus, the output size calculation formula is as follows: Output: => size: Input / Stride => shape: (size, size, depth) After the operation, there will be total 32 matrixes sized 24 x 24, containing scalar values from the convolution operaiton (as mentioned on the output shape). The Conv2D may return these matrixes AS THEY ARE (= feature maps), or return after processing them with activation function (= activation maps). Types: Sigmoid, Hyperbolic tangent (tanh), ReLU, etc... ReLU activation function is currently best used on CNN model or probably in general cases. This is because of the nature where ReLU calculation is simple and inexpensive as it only has derivative of 0 and 1. In summary, when this Conv2D layer accepts a single 28 x 28 of grayscale data, it returns 32 of 24 x 24 activation maps. Each activation map represents the pattern trait similarity measurement to its corresponding kernel; higher the value indicates that's where the kernel pattern could be found! """ layerConv2D_1 = tf.keras.layers.Conv2D( filters = 32, # The number of filter in the Conv2D layer: 32 kernel_size = (5, 5), # The size of the filter in the Conv2D layer. strides = (1, 1), # The amount of pixels to shift the kernel. padding = 'valid', # Leave the input data AS IS wihtout any padding ('valid'). activation = 'relu', # Activation function (default: None). input_shape = (28, 28, 1), # First Conv2D must specify the shape of input: 28 x 28 Grayscale channel ) """ 2D MAX POOLING LAYER The MaxPool2D layer works similar to Conv2D; when kernel operates convolution that gives a single scalar, MaxPooling gives a MAX value within the window. This compresses the activation maps that only leave the significants. MaxPooling layer does not increase the depth of the activation maps. It only shrinks the size of currently existing activation maps. The calculation formual on deriving output size is the same as Conv2D's since the fact that the pooling operation returns a single scalar is the same: * Input: Input size * Window: Pooling Window size (generally 2 x 2) * Stride: Pixels to stride/shift (default: size of the Pooling Window) * Padding: 0 when padding is set to 'valid' Output: => size: [(Input - Kernel + (2 * Padding)) / Stride] + 1 => shape: (size, size, depth) ...but if the output size is not an integer, the strides are incorrect! Example: >> Input = (24, 24), Window = (2, 2), Stride = (2, 2)... => size: [(24 - 2 + (2 * 0)) / 2] + 1 = 12 => shape: (12, 12, 32) """ layerMaxPooling2D_1 = tf.keras.layers.MaxPool2D( pool_size = (2, 2), # The size of a pooling window in the Conv2D layer. strides = (2, 2), # The amount of pixels to shift a pooling window. padding = 'valid', # Leave the input data AS IS wihtout any padding ('valid'). ) """ OUTPUT >> Input = (12, 12), Depth = 64, Kernel = (3, 3), Stride = (1, 1)... => size: [(12 - 3 + (2 * 0)) / 1] + 1 = 10 => shape: (10, 10, 64) DO NOT MISTAKEN: this does not mean the overall activation maps have increased from 32 to 64 matrixes. The 64 output depth is "per" input data. Because there were previously 32 activation maps from 'layerConv2D_1', each activation maps having its own 64 activation maps from 'layerConv2D_2' makes 32 * 64 = 2048 activation maps in total. However, what's important is not the number of total activation maps. It is the output shape, eg. (10, 10, 64) that matters the most! """ layerConv2D_2 = tf.keras.layers.Conv2D( filters = 64, kernel_size = (3, 3), ) """ OUTPUT >> Input = (10, 12), Window = (2, 2), Stride = (2, 2)... => size: [(10 - 2 + (2 * 0)) / 2] + 1 = 5 => shape: (5, 5, 64) """ layerMaxPooling2D_2 = tf.keras.layers.MaxPool2D( pool_size = (2, 2), ) """ OUTPUT >> Input = (5, 5), Depth = 64, Kernel = (3, 3), Stride = (1, 1)... => size: [(5 - 3 + (2 * 0)) / 1] + 1 = 3 => shape: (3, 3, 64) In this case, there is no MaxPool2D layer followed by this Conv2D layer. It does not matter since MaxPool2D is just to compress the size of activation maps. TOTAL ACTIVATION MAPS: 32 -> 2048 (= 32 * 64) -> 131072 (= 32 * 64 * 64) Remember, what is more important is not the total number of activation maps but the output shape from the 'layerConv2D_3' Conv2D layer. """ layerConv2D_3 = tf.keras.layers.Conv2D( filters = 64, kernel_size= (3, 3), ) """ FLATTEN LAYER The Flatten layer compresses not just the size of the input data (that is, output data from 'layerConv2D_3' layer) but "reshapes" to a single dimensional tensor for a Dense layer. Considering the output shape from 'layerConv2D_3' layer was (None, 3, 3, 64), the layer flattens the tensor to (None, 576) which is 3 * 3 * 64 = 576. Here, the None in the very front of the tensor shape indicates the support for any batch size (dynamic)! """ layerFlatten = tf.keras.layers.Flatten() """ DENSE LAYER The Dense (aka. fully-connected) layer is where the training parameters and forward propagation for hypothesis/prediction occurs. Previous layers (eg. Conv2D, MaxPool2D, Flatten) are just to extract the characteristics & traits for image classification that is stored in activation maps. """ layerDense_1 = tf.keras.layers.Dense( units = 192, activation = 'relu', ) layerDense_2 = tf.keras.layers.Dense( units = 64, activation = 'relu', ) layerSoftmax = tf.keras.layers.Dense( units = 30, activation = 'softmax', )
<filename>tests/test_wheel.py # -*- coding: utf-8 -*- # # Copyright (C) 2013 <NAME>. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # from __future__ import unicode_literals import io import os import re import shutil import subprocess import sys import tempfile from compat import unittest from support import DistlibTestCase from distlib import DistlibException from distlib.compat import ZipFile, sysconfig, fsencode from distlib.database import DistributionPath from distlib.manifest import Manifest from distlib.metadata import Metadata, METADATA_FILENAME, LEGACY_METADATA_FILENAME from distlib.scripts import ScriptMaker from distlib.util import get_executable from distlib.wheel import (Wheel, PYVER, IMPVER, ARCH, ABI, COMPATIBLE_TAGS, is_compatible, _get_glibc_version) try: with open(os.devnull, 'wb') as junk: subprocess.check_call(['pip', '--version'], stdout=junk, stderr=subprocess.STDOUT) PIP_AVAILABLE = True except Exception: PIP_AVAILABLE = False HERE = os.path.abspath(os.path.dirname(__file__)) EGG_INFO_RE = re.compile(r'(-py\d\.\d)?\.egg-info', re.I) def pip_version(): result = None fd, fn = tempfile.mkstemp(prefix='distlib-test-', suffix='.txt') try: os.close(fd) with open(fn, 'wb') as out: subprocess.check_call(['pip', '--version'], stdout=out, stderr=subprocess.STDOUT) with io.open(fn, encoding='utf-8') as f: data = f.read().split() assert data[0] == 'pip' parts = data[1].split('.') result = [] for p in parts: if p.isdigit(): result.append(int(p)) else: result.append(p) result = tuple(result) finally: os.remove(fn) return result def convert_egg_info(libdir, prefix): files = os.listdir(libdir) ei = list(filter(lambda d: d.endswith('.egg-info'), files))[0] olddn = os.path.join(libdir, ei) di = EGG_INFO_RE.sub('.dist-info', ei) newdn = os.path.join(libdir, di) os.rename(olddn, newdn) files = os.listdir(newdn) for oldfn in files: pn = os.path.join(newdn, oldfn) if oldfn == 'PKG-INFO': md = Metadata(path=pn) mn = os.path.join(newdn, METADATA_FILENAME) md.write(mn) os.remove(pn) manifest = Manifest(os.path.dirname(libdir)) manifest.findall() dp = DistributionPath([libdir]) dist = next(dp.get_distributions()) dist.write_installed_files(manifest.allfiles, prefix) def install_dist(distname, workdir): pfx = '--install-option=' purelib = pfx + '--install-purelib=%s/purelib' % workdir platlib = pfx + '--install-platlib=%s/platlib' % workdir headers = pfx + '--install-headers=%s/headers' % workdir scripts = pfx + '--install-scripts=%s/scripts' % workdir data = pfx + '--install-data=%s/data' % workdir cmd = ['pip', 'install', '--index-url', 'https://pypi.org/simple/', '--timeout', '3', '--default-timeout', '3', purelib, platlib, headers, scripts, data, distname] result = { 'scripts': os.path.join(workdir, 'scripts'), 'headers': os.path.join(workdir, 'headers'), 'data': os.path.join(workdir, 'data'), } p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, _ = p.communicate() if p.returncode: raise ValueError('pip failed to install %s:\n%s' % (distname, stdout)) for dn in ('purelib', 'platlib'): libdir = os.path.join(workdir, dn) if os.path.isdir(libdir): result[dn] = libdir break convert_egg_info(libdir, workdir) dp = DistributionPath([libdir]) dist = next(dp.get_distributions()) md = dist.metadata result['name'] = md.name result['version'] = md.version return result class WheelTestCase(DistlibTestCase): def test_valid_filename(self): attrs = ('name', 'version', 'buildver', 'pyver', 'abi', 'arch') cases = ( ('pkg-1.0.0-cp32.cp33-noabi-noarch.whl', ('pkg', '1.0.0', '', ['cp32', 'cp33'], ['noabi'], ['noarch'])), ('package-1.0.0-cp33-noabi-linux_x86_64.whl', ('package', '1.0.0', '', ['cp33'], ['noabi'], ['linux_x86_64'])), ('test-1.0-1st-py2.py3-none-win32.whl', ('test', '1.0', '1st', ['py2', 'py3'], ['none'], ['win32'])), ('Pillow-2.8.1-cp27-none-macosx_10_6_intel.' 'macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.' 'macosx_10_10_x86_64.whl', ('Pillow', '2.8.1', '', ['cp27'], ['none'], ['macosx_10_6_intel', 'macosx_10_9_intel', 'macosx_10_9_x86_64', 'macosx_10_10_intel', 'macosx_10_10_x86_64'])), ) for name, values in cases: w = Wheel(name) self.assertEqual(w.wheel_version, (1, 1)) self.assertEqual(w.filename, name) for attr, value in zip(attrs, values): self.assertEqual(getattr(w, attr), value) def test_invalid_filename(self): names = ( '', 'package.whl', 'package-1.0.0-cp32.cp33.whl', 'package-1.0.0-cp32.cp33.whl', 'package-1.0.0-cp32.cp33-noabi.whl', 'package-1.0.0-cp32.cp33-noabi-noarch.zip', ) for name in names: self.assertRaises(DistlibException, Wheel, name) def test_valid_name(self): attrs = ('name', 'version', 'buildver', 'pyver', 'abi', 'arch') pyver = PYVER cases = ( ('pkg-1.0.0', ('pkg', '1.0.0', '', [PYVER], ['none'], ['any'])), ('test-1.0-1st', ('test', '1.0', '1st', [PYVER], ['none'], ['any'])), (None, ('dummy', '0.1', '', [PYVER], ['none'], ['any'])), ) ENDING = '-%s-none-any.whl' % PYVER for name, values in cases: w = Wheel(name) self.assertEqual(w.wheel_version, (1, 1)) self.assertTrue(w.filename.endswith(ENDING)) for attr, value in zip(attrs, values): self.assertEqual(getattr(w, attr), value) def test_compatible_tags(self): self.assertEqual(PYVER, 'py%d%d' % sys.version_info[:2]) tags = COMPATIBLE_TAGS self.assertIn((PYVER, 'none', 'any'), tags) self.assertIn((PYVER[:-1], 'none', 'any'), tags) this_arch = filter(lambda o: o[-1] == ARCH, tags) self.assertTrue(this_arch) if sys.platform.startswith('linux'): arch = ARCH.replace('linux_', '') parts = _get_glibc_version() if len(parts) == 2: self.assertTrue(filter(lambda o: o[-1] == 'manylinux_%s_%s_%s' % (parts[0], parts[1], arch), tags)) if parts >= (2, 17): self.assertTrue(filter(lambda o: o[-1] == 'manylinux2014_%s' % arch, tags)) if parts >= (2, 12): self.assertTrue(filter(lambda o: o[-1] == 'manylinux2010_%s' % arch, tags)) if parts >= (2, 5): self.assertTrue(filter(lambda o: o[-1] == 'manylinux1_%s' % arch, tags)) def test_is_compatible(self): fn = os.path.join(HERE, 'dummy-0.1-py27-none-any.whl') if PYVER in ('py27', 'py30', 'py31'): self.assertTrue(is_compatible(fn)) self.assertTrue(Wheel(fn).is_compatible()) # use actual wheel names from PyPI. wheel_names = [ 'simplejson-3.17.2-cp27-cp27m-macosx_10_13_x86_64.whl', 'simplejson-3.17.2-cp27-cp27m-manylinux1_i686.whl', 'simplejson-3.17.2-cp27-cp27m-manylinux1_x86_64.whl', 'simplejson-3.17.2-cp27-cp27m-manylinux2010_i686.whl', 'simplejson-3.17.2-cp27-cp27m-manylinux2010_x86_64.whl', 'simplejson-3.17.2-cp27-cp27mu-manylinux1_i686.whl', 'simplejson-3.17.2-cp27-cp27mu-manylinux1_x86_64.whl', 'simplejson-3.17.2-cp27-cp27mu-manylinux2010_i686.whl', 'simplejson-3.17.2-cp27-cp27mu-manylinux2010_x86_64.whl', 'simplejson-3.17.2-cp27-cp27m-win32.whl', 'simplejson-3.17.2-cp27-cp27m-win_amd64.whl', 'simplejson-3.17.2-cp33-cp33m-win32.whl', 'simplejson-3.17.2-cp33-cp33m-win_amd64.whl', 'simplejson-3.17.2-cp34-cp34m-win32.whl', 'simplejson-3.17.2-cp34-cp34m-win_amd64.whl', 'simplejson-3.17.2-cp35-cp35m-manylinux1_i686.whl', 'simplejson-3.17.2-cp35-cp35m-manylinux1_x86_64.whl', 'simplejson-3.17.2-cp35-cp35m-manylinux2010_i686.whl', 'simplejson-3.17.2-cp35-cp35m-manylinux2010_x86_64.whl', 'simplejson-3.17.2-cp35-cp35m-manylinux2014_aarch64.whl', 'simplejson-3.17.2-cp35-cp35m-win32.whl', 'simplejson-3.17.2-cp35-cp35m-win_amd64.whl', 'simplejson-3.17.2-cp36-cp36m-macosx_10_13_x86_64.whl', 'simplejson-3.17.2-cp36-cp36m-manylinux1_i686.whl', 'simplejson-3.17.2-cp36-cp36m-manylinux1_x86_64.whl', 'simplejson-3.17.2-cp36-cp36m-manylinux2010_i686.whl', 'simplejson-3.17.2-cp36-cp36m-manylinux2010_x86_64.whl', 'simplejson-3.17.2-cp36-cp36m-manylinux2014_aarch64.whl', 'simplejson-3.17.2-cp36-cp36m-win32.whl', 'simplejson-3.17.2-cp36-cp36m-win_amd64.whl', 'simplejson-3.17.2-cp37-cp37m-macosx_10_14_x86_64.whl', 'simplejson-3.17.2-cp37-cp37m-manylinux1_i686.whl', 'simplejson-3.17.2-cp37-cp37m-manylinux1_x86_64.whl', 'simplejson-3.17.2-cp37-cp37m-manylinux2010_i686.whl', 'simplejson-3.17.2-cp37-cp37m-manylinux2010_x86_64.whl', 'simplejson-3.17.2-cp37-cp37m-manylinux2014_aarch64.whl', 'simplejson-3.17.2-cp37-cp37m-win32.whl', 'simplejson-3.17.2-cp37-cp37m-win_amd64.whl', 'simplejson-3.17.2-cp38-cp38-macosx_10_14_x86_64.whl', 'simplejson-3.17.2-cp38-cp38-manylinux1_i686.whl', 'simplejson-3.17.2-cp38-cp38-manylinux1_x86_64.whl', 'simplejson-3.17.2-cp38-cp38-manylinux2010_i686.whl', 'simplejson-3.17.2-cp38-cp38-manylinux2010_x86_64.whl', 'simplejson-3.17.2-cp38-cp38-manylinux2014_aarch64.whl', 'Pillow-7.2.0-cp35-cp35m-macosx_10_10_intel.whl', 'Pillow-7.2.0-cp35-cp35m-manylinux1_i686.whl', 'Pillow-7.2.0-cp35-cp35m-manylinux1_x86_64.whl', 'Pillow-7.2.0-cp35-cp35m-manylinux2014_aarch64.whl', 'Pillow-7.2.0-cp35-cp35m-win32.whl', 'Pillow-7.2.0-cp35-cp35m-win_amd64.whl', 'Pillow-7.2.0-cp36-cp36m-macosx_10_10_x86_64.whl', 'Pillow-7.2.0-cp36-cp36m-manylinux1_i686.whl', 'Pillow-7.2.0-cp36-cp36m-manylinux1_x86_64.whl', 'Pillow-7.2.0-cp36-cp36m-manylinux2014_aarch64.whl', 'Pillow-7.2.0-cp36-cp36m-win32.whl', 'Pillow-7.2.0-cp36-cp36m-win_amd64.whl', 'Pillow-7.2.0-cp37-cp37m-macosx_10_10_x86_64.whl', 'Pillow-7.2.0-cp37-cp37m-manylinux1_i686.whl', 'Pillow-7.2.0-cp37-cp37m-manylinux1_x86_64.whl', 'Pillow-7.2.0-cp37-cp37m-manylinux2014_aarch64.whl', 'Pillow-7.2.0-cp37-cp37m-win32.whl', 'Pillow-7.2.0-cp37-cp37m-win_amd64.whl', 'Pillow-7.2.0-cp38-cp38-macosx_10_10_x86_64.whl', 'Pillow-7.2.0-cp38-cp38-manylinux1_i686.whl', 'Pillow-7.2.0-cp38-cp38-manylinux1_x86_64.whl', 'Pillow-7.2.0-cp38-cp38-manylinux2014_aarch64.whl', 'Pillow-7.2.0-cp38-cp38-win32.whl', 'Pillow-7.2.0-cp38-cp38-win_amd64.whl', 'Pillow-7.2.0-pp36-pypy36_pp73-macosx_10_10_x86_64.whl', 'Pillow-7.2.0-pp36-pypy36_pp73-manylinux2010_x86_64.whl', 'Pillow-7.2.0-pp36-pypy36_pp73-win32.whl', 'reportlab-3.5.47-cp27-cp27m-macosx_10_9_x86_64.whl', 'reportlab-3.5.47-cp27-cp27m-manylinux1_i686.whl', 'reportlab-3.5.47-cp27-cp27m-manylinux1_x86_64.whl', 'reportlab-3.5.47-cp27-cp27m-manylinux2010_i686.whl', 'reportlab-3.5.47-cp27-cp27m-manylinux2010_x86_64.whl', 'reportlab-3.5.47-cp27-cp27mu-manylinux1_i686.whl', 'reportlab-3.5.47-cp27-cp27mu-manylinux1_x86_64.whl', 'reportlab-3.5.47-cp27-cp27mu-manylinux2010_i686.whl', 'reportlab-3.5.47-cp27-cp27mu-manylinux2010_x86_64.whl', 'reportlab-3.5.47-cp27-cp27m-win32.whl', 'reportlab-3.5.47-cp27-cp27m-win_amd64.whl', 'reportlab-3.5.47-cp35-cp35m-macosx_10_6_intel.whl', 'reportlab-3.5.47-cp35-cp35m-manylinux1_i686.whl', 'reportlab-3.5.47-cp35-cp35m-manylinux1_x86_64.whl', 'reportlab-3.5.47-cp35-cp35m-manylinux2010_i686.whl', 'reportlab-3.5.47-cp35-cp35m-manylinux2010_x86_64.whl', 'reportlab-3.5.47-cp35-cp35m-win32.whl', 'reportlab-3.5.47-cp35-cp35m-win_amd64.whl', 'reportlab-3.5.47-cp36-cp36m-macosx_10_9_x86_64.whl', 'reportlab-3.5.47-cp36-cp36m-manylinux1_i686.whl', 'reportlab-3.5.47-cp36-cp36m-manylinux1_x86_64.whl', 'reportlab-3.5.47-cp36-cp36m-manylinux2010_i686.whl', 'reportlab-3.5.47-cp36-cp36m-manylinux2010_x86_64.whl', 'reportlab-3.5.47-cp36-cp36m-win32.whl', 'reportlab-3.5.47-cp36-cp36m-win_amd64.whl', 'reportlab-3.5.47-cp37-cp37m-macosx_10_9_x86_64.whl', 'reportlab-3.5.47-cp37-cp37m-manylinux1_i686.whl', 'reportlab-3.5.47-cp37-cp37m-manylinux1_x86_64.whl', 'reportlab-3.5.47-cp37-cp37m-manylinux2010_i686.whl', 'reportlab-3.5.47-cp37-cp37m-manylinux2010_x86_64.whl', 'reportlab-3.5.47-cp37-cp37m-win32.whl', 'reportlab-3.5.47-cp37-cp37m-win_amd64.whl', 'reportlab-3.5.47-cp38-cp38-macosx_10_9_x86_64.whl', 'reportlab-3.5.47-cp38-cp38-manylinux1_i686.whl', 'reportlab-3.5.47-cp38-cp38-manylinux1_x86_64.whl', 'reportlab-3.5.47-cp38-cp38-manylinux2010_i686.whl', 'reportlab-3.5.47-cp38-cp38-manylinux2010_x86_64.whl', 'reportlab-3.5.47-cp38-cp38-win32.whl', 'reportlab-3.5.47-cp38-cp38-win_amd64.whl', ] for fn in filter(is_compatible, wheel_names): w = Wheel(fn) our_arch = ARCH.replace('linux_', '') for pyver, abi, arch in w.tags: self.assertEqual(pyver, IMPVER) self.assertEqual(abi, ABI) if sys.platform != 'darwin': self.assertTrue(arch.endswith(our_arch)) else: m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', our_arch) self.assertTrue(m) _, major, minor, our_arch_kind = m.groups() our_major = int(major) our_minor = int(minor) m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', arch) self.assertTrue(m) _, major, minor, arch_kind = m.groups() major = int(major) minor = int(minor) self.assertEqual(major, our_major) self.assertLessEqual(minor, our_minor) if arch_kind in ('x86_64', 'i386'): self.assertEqual(arch_kind, our_arch_kind) elif arch_kind == 'fat': self.assertIn(our_arch_kind, ('i386', 'ppc')) elif arch_kind == 'fat3': self.assertIn(our_arch_kind, ('i386', 'ppc', 'x86_x64')) elif arch_kind == 'fat64': self.assertIn(our_arch_kind, ('ppc64', 'x86_x64')) elif arch_kind == 'intel': self.assertIn(our_arch_kind, ('i386', 'x86_x64')) elif arch_kind == 'universal': self.assertIn(our_arch_kind, ('i386', 'ppc', 'ppc64', 'x86_x64', 'intel')) if 'manylinux' in arch: self.assertTrue(sys.platform.startswith('linux')) parts = _get_glibc_version() self.assertEqual(len(parts), 2) if 'manylinux2014_' in arch: self.assertTrue(parts >= (2, 17)) if 'manylinux2010_' in arch: self.assertTrue(parts >= (2, 12)) if 'manylinux1_' in arch: self.assertTrue(parts >= (2, 5)) if 'manylinux_' in arch: s = 'manylinux_%s_%s_' % parts self.assertIn(s, arch) def test_metadata(self): fn = os.path.join(HERE, 'dummy-0.1-py27-none-any.whl') w = Wheel(fn) md = w.metadata self.assertEqual(md.name, 'dummy') self.assertEqual(md.version, '0.1') def test_invalid(self): fn = os.path.join(HERE, 'dummy-0.1-py27-none-any.whl') w = Wheel(fn) self.assertRaises(DistlibException, w.get_hash, b'', 'badalgo') def check_built_wheel(self, wheel, expected): for key in expected: self.assertEqual(expected[key], getattr(wheel, key)) fn = os.path.join(wheel.dirname, wheel.filename) self.assertTrue(os.path.exists(fn)) name, version = wheel.name, wheel.version with ZipFile(fn, 'r') as zf: for key in ('scripts', 'headers', 'data'): arcname = '%s-%s.data/%s/%s_file.txt' % (name, version, key, key) with zf.open(arcname) as bf: data = bf.read() expected = ('dummy data - %s' % key).encode('utf-8') if key == 'scripts': expected = b'#!python\n' + expected self.assertTrue(data, expected) def test_build_tags(self): workdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, workdir) name = 'dummy' version = '0.1' paths = {'prefix': workdir} for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'): paths[key] = p = os.path.join(workdir, key) os.makedirs(p) fn = os.path.join(p, '%s_file.txt' % key) with open(fn, 'w') as f: f.write('dummy data - %s' % key) if key in ('purelib', 'platlib'): p = os.path.join(p, '%s-%s.dist-info' % (name, version)) os.makedirs(p) fn = os.path.join(p, 'RECORD') purelib = paths.pop('purelib') platlib = paths.pop('platlib') # Make a pure wheel with default tags paths['purelib'] = purelib wheel = Wheel('%s-%s' % (name, version)) wheel.dirname = workdir wheel.build(paths) expected = { 'name': name, 'version': version, 'pyver': [PYVER], 'abi': ['none'], 'arch': ['any'], 'filename': 'dummy-0.1-%s-none-any.whl' % PYVER, } self.check_built_wheel(wheel, expected) # Make a pure wheel with custom tags pyver = [PYVER[:-1], PYVER] wheel.build(paths, {'pyver': pyver}) expected = { 'name': name, 'version': version, 'pyver': pyver, 'abi': ['none'], 'arch': ['any'], 'filename': 'dummy-0.1-%s-none-any.whl' % '.'.join(pyver), } self.check_built_wheel(wheel, expected) # Make a non-pure wheel with default tags paths.pop('purelib') paths['platlib'] = platlib wheel.build(paths) expected['pyver'] = [IMPVER] expected['abi'] = [ABI] expected['arch'] = [ARCH] expected['filename'] = 'dummy-0.1-%s-%s-%s.whl' % (IMPVER, ABI, ARCH) self.check_built_wheel(wheel, expected) def do_build_and_install(self, dist): srcdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, srcdir) dstdir = tempfile.mkdtemp() self.addCleanup(shutil.rmtree, dstdir) paths = install_dist(dist, srcdir) paths['prefix'] = srcdir w = Wheel() w.name = paths.pop('name') w.version = paths.pop('version') w.dirname = srcdir pathname = w.build(paths) self.assertTrue(os.path.exists(pathname)) paths = {'prefix': dstdir} for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'): paths[key] = os.path.join(dstdir, key) w = Wheel(pathname) maker = ScriptMaker(None, None, add_launchers=False) maker.executable = os.path.join(paths['scripts'], 'python') dist = w.install(paths, maker) self.assertIsNotNone(dist) self.assertEqual(dist.name, w.name) self.assertEqual(dist.version, w.version) shared = dist.shared_locations self.assertTrue(shared) os.remove(pathname) sm = Manifest(srcdir) sm.findall() sfiles = set([os.path.relpath(p, srcdir) for p in sm.allfiles]) dm = Manifest(dstdir) dm.findall() dfiles = set([os.path.relpath(p, dstdir) for p in dm.allfiles]) omitted = sfiles - dfiles omitted = omitted.pop() endings = os.path.join('.dist-info', 'WHEEL'), '.pyc', '.pyo' self.assertTrue(omitted.endswith(endings)) def test_version_incompatibility(self): class Warner(object): def __call__(self, wheel_version, file_version): self.wheel_version = wheel_version self.file_version = file_version fn = os.path.join(HERE, 'dummy-0.1-py27-none-any.whl') dstdir =
<reponame>wedebe/enigma2-plugins #!/usr/bin/python # -*- coding: utf-8 -*- # Advanced Movie Selection for Dreambox-Enigma2 # # The plugin is developed on the basis from a lot of single plugins (thx for the code @ all) # Coded by JackDaniel & cmikula(c)2011 # Support: www.i-have-a-dreambox.com # # This plugin is licensed under the Creative Commons # Attribution-NonCommercial-ShareAlike 3.0 Unported # License. To view a copy of this license, visit # http://creativecommons.org/licenses/by-nc-sa/3.0/ or send a letter to Creative # Commons, 559 <NAME>, Stanford, California 94305, USA. # # Alternatively, this plugin may be distributed and executed on hardware which # is licensed by Dream Multimedia GmbH. # # This plugin is NOT free software. It is open source, you are allowed to # modify it (if you keep the license), but it may not be commercially # distributed other than under the conditions noted above. # from __init__ import _ from enigma import ePoint from Screens.Screen import Screen from RecordPaths import RecordPathsSettings from About import AdvancedMovieSelectionAbout from Components.Pixmap import Pixmap from Components.PluginComponent import plugins from Plugins.Plugin import PluginDescriptor from Components.config import config, getConfigListEntry, configfile, ConfigSelection from Components.Sources.StaticText import StaticText from Components.Button import Button from Components import ConfigList as eConfigList from Screens.VirtualKeyBoard import VirtualKeyBoard from Screens.LocationBox import MovieLocationBox from Components.UsageConfig import preferredPath from Screens.MessageBox import MessageBox from MessageBoxEx import MessageBox as MessageBoxEx from Components.Sources.Boolean import Boolean from Components.Sources.List import List from Components.ActionMap import ActionMap, NumberActionMap from enigma import getDesktop, quitMainloop from ClientSetup import ClientSetup from Source.Globals import pluginPresent, SkinTools from Source.Config import qButtons class ConfigList(eConfigList.ConfigList): def __init__(self, list, session=None): eConfigList.ConfigList.__init__(self, list, session=session) def selectionChanged(self): if isinstance(self.current, tuple) and len(self.current) >= 2: self.current[1].onDeselect(self.session) self.current = self.getCurrent() if isinstance(self.current, tuple) and len(self.current) >= 2: self.current[1].onSelect(self.session) else: return for x in self.onSelectionChanged: x() def preWidgetRemove(self, instance): if isinstance(self.current, tuple) and len(self.current) >= 2: self.current[1].onDeselect(self.session) instance.selectionChanged.get().remove(self.selectionChanged) instance.setContent(None) class ConfigListScreen(eConfigList.ConfigListScreen): def __init__(self, list, session=None, on_change=None): self["config_actions"] = NumberActionMap(["SetupActions", "InputAsciiActions", "KeyboardInputActions"], { "gotAsciiCode": self.keyGotAscii, "ok": self.keyOK, "left": self.keyLeft, "right": self.keyRight, "home": self.keyHome, "end": self.keyEnd, "deleteForward": self.keyDelete, "deleteBackward": self.keyBackspace, "toggleOverwrite": self.keyToggleOW, "1": self.keyNumberGlobal, "2": self.keyNumberGlobal, "3": self.keyNumberGlobal, "4": self.keyNumberGlobal, "5": self.keyNumberGlobal, "6": self.keyNumberGlobal, "7": self.keyNumberGlobal, "8": self.keyNumberGlobal, "9": self.keyNumberGlobal, "0": self.keyNumberGlobal }, -1) # to prevent left/right overriding the listbox self["VirtualKB"] = ActionMap(["VirtualKeyboardActions"], { "showVirtualKeyboard": self.KeyText, }, -2) self["VirtualKB"].setEnabled(False) self["config"] = ConfigList(list, session=session) if on_change is not None: self.__changed = on_change else: self.__changed = lambda: None if not self.handleInputHelpers in self["config"].onSelectionChanged: self["config"].onSelectionChanged.append(self.handleInputHelpers) from Source.Globals import SkinResolutionHelper class BackupRestore(ConfigListScreen, Screen, SkinResolutionHelper): def __init__(self, session, csel=None): Screen.__init__(self, session) SkinResolutionHelper.__init__(self) self.csel = csel self["setupActions"] = ActionMap(["OkCancelActions", "ColorActions"], { "ok": self.okPressed, "cancel": self.close, "red": self.close, "green": self.openFilebrowser, "yellow": self.backup }, -2) self.list = [] self.backup_dirs = config.movielist.videodirs.value[:] default = config.usage.default_path.value if default not in self.backup_dirs: self.backup_dirs.append(default) if config.AdvancedMovieSelection.backup_path.value: default = config.AdvancedMovieSelection.backup_path.value if default not in self.backup_dirs: print "path from config:", default self.backup_dirs.append(default) print "backup dirs:", self.backup_dirs self.backup_config_path = ConfigSelection(default=default, choices=self.backup_dirs) self.list.append(getConfigListEntry(_("Backup directory path:"), self.backup_config_path)) ConfigListScreen.__init__(self, self.list, session=self.session) self["key_red"] = StaticText(_("Close")) self["key_green"] = StaticText(_("Restore settings")) self["key_yellow"] = StaticText(_("Backup settings")) self.onShown.append(self.setWindowTitle) def setWindowTitle(self): self.setTitle(_("Backup/Restore Advanced Movie Selection settings")) def getBackupPath(self): return self.backup_config_path.getValue() def backup(self): from Source.Config import createBackup path = self.getBackupPath() result = createBackup(path) if result: self.session.open(MessageBox, _("Settings backup successfully created in %s.") % (result), type=MessageBox.TYPE_INFO) self.close() else: self.session.open(MessageBox, _("Error creating settings backup!"), type=MessageBox.TYPE_ERROR) def openFilebrowser(self): from FileBrowser import FileBrowser path = self.getBackupPath() self.session.openWithCallback(self.restoreCallback, FileBrowser, path) def restoreCallback(self, answer): print answer if answer: from Source.Config import loadBackup loadBackup(answer) self.session.open(MessageBox, _("Some settings changes require close/reopen the movielist to take effect."), type=MessageBox.TYPE_INFO) self.close() def okPressed(self): from Screens.LocationBox import LocationBox path = self.getBackupPath() from Components.config import ConfigLocations locations = ConfigLocations(self.backup_dirs) self.session.openWithCallback(self.dirnameSelected, LocationBox, _("Please select backup path here:"), currDir=path, bookmarks=locations) def dirnameSelected(self, answer): if not answer: return print "backup path:", answer if answer not in self.backup_dirs: self.backup_dirs.append(answer) self.backup_config_path.setChoices(self.backup_dirs, default=answer) self.backup_config_path.setValue(answer) config.AdvancedMovieSelection.backup_path.value = answer config.AdvancedMovieSelection.backup_path.save() class AdvancedMovieSelectionSetup(ConfigListScreen, Screen): def __init__(self, session, csel=None): Screen.__init__(self, session) self.csel = csel self.skinName = SkinTools.appendResolution("AdvancedMovieSelectionSetup") self.bouquet_length = 13 self.needsRestartFlag = False self.needsE2restartFlag = False self.needsReopenFlag = False self["setupActions"] = ActionMap(["ColorActions", "OkCancelActions", "MenuActions", "EPGSelectActions"], { "ok": self.keySave, "cancel": self.keyCancel, "red": self.keyCancel, "green": self.keySave, "yellow": self.buttonsetup, "blue": self.RecPathSettings, "info": self.about, "menu": self.clientsetup, "nextBouquet": self.nextBouquet, "prevBouquet": self.prevBouquet, }, -2) self.list = [] ConfigListScreen.__init__(self, self.list, session=self.session) if not self.showHelp in self["config"].onSelectionChanged: self["config"].onSelectionChanged.append(self.showHelp) self.createSetup() self["key_red"] = StaticText(_("Close")) self["key_green"] = StaticText(_("Save")) self["key_yellow"] = StaticText(_("Color key settings")) self["key_blue"] = StaticText(_("Default paths")) self["help"] = StaticText("") self["Trailertxt"] = StaticText("") self["TMDbtxt"] = StaticText("") self["IMDbtxt"] = StaticText("") self["OFDbtxt"] = StaticText("") self["MenuIcon"] = Pixmap() self.onShown.append(self.setWindowTitle) self.onLayoutFinish.append(self.saveListsize) self.pluginsavailable() self.onHide.append(self.updateSettings) self.setMenubutton() def setMenubutton(self): if config.AdvancedMovieSelection.use_wastebasket.value: self["MenuIcon"].show() else: self["MenuIcon"].hide() def clientsetup(self): if config.AdvancedMovieSelection.use_wastebasket.value: self.session.open(ClientSetup) def updateSettings(self): if self.csel: self.csel["list"].updateSettings() self.csel["list"].updateHotplugDevices() self.csel.reloadList() def saveListsize(self): listsize = self["config"].instance.size() self.listWidth = listsize.width() self.listHeight = listsize.height() self.bouquet_length = int(self.listHeight / 25) def nextBouquet(self): self["config"].setCurrentIndex(max(self["config"].getCurrentIndex() - self.bouquet_length, 0)) def prevBouquet(self): self["config"].setCurrentIndex(min(self["config"].getCurrentIndex() + self.bouquet_length, len(self.list) - 1)) def setWindowTitle(self): self.setTitle(_("Advanced Movie Selection Setup")) def keyLeft(self): ConfigListScreen.keyLeft(self) self.checkListentrys() def keyRight(self): ConfigListScreen.keyRight(self) self.checkListentrys() def checkListentrys(self): needRefresh = False if config.AdvancedMovieSelection.show_dirsize.isChanged(): config.AdvancedMovieSelection.show_dirsize.save() needRefresh = True if config.AdvancedMovieSelection.show_date_shortdesc.isChanged(): config.AdvancedMovieSelection.show_date_shortdesc.save() needRefresh = True if config.AdvancedMovieSelection.use_original_movieplayer_summary.isChanged(): config.AdvancedMovieSelection.use_original_movieplayer_summary.save() needRefresh = True if not config.AdvancedMovieSelection.use_wastebasket.value: config.AdvancedMovieSelection.auto_empty_wastebasket.setValue("-1") config.AdvancedMovieSelection.auto_empty_wastebasket.save() if config.AdvancedMovieSelection.auto_empty_wastebasket.isChanged(): config.AdvancedMovieSelection.auto_empty_wastebasket.save() needRefresh = True if config.AdvancedMovieSelection.show_picon.isChanged(): config.AdvancedMovieSelection.show_picon.save() needRefresh = True if config.usage.load_length_of_movies_in_moviellist.isChanged(): config.usage.load_length_of_movies_in_moviellist.save() needRefresh = True if config.AdvancedMovieSelection.showpreview.isChanged(): config.AdvancedMovieSelection.showpreview.save() self.needsReopenFlag = True needRefresh = True if config.AdvancedMovieSelection.showcolorstatusinmovielist.isChanged(): config.AdvancedMovieSelection.showcolorstatusinmovielist.save() needRefresh = True if config.AdvancedMovieSelection.exitkey.isChanged(): config.AdvancedMovieSelection.exitkey.save() needRefresh = True if config.AdvancedMovieSelection.useseekbar.isChanged(): config.AdvancedMovieSelection.useseekbar.save() self.needsRestartFlag = True needRefresh = True if config.AdvancedMovieSelection.video_preview.isChanged(): config.AdvancedMovieSelection.video_preview.save() needRefresh = True if config.AdvancedMovieSelection.video_preview.isChanged(): if not config.AdvancedMovieSelection.video_preview_fullscreen.value: config.AdvancedMovieSelection.video_preview.save() needRefresh = True else: config.AdvancedMovieSelection.video_preview.save() needRefresh = True self.needsReopenFlag = True if config.AdvancedMovieSelection.minitv.isChanged(): config.AdvancedMovieSelection.minitv.save() if not config.AdvancedMovieSelection.minitv.value: config.AdvancedMovieSelection.video_preview.setValue(False) config.AdvancedMovieSelection.video_preview.save() needRefresh = True if config.AdvancedMovieSelection.video_preview_autostart.isChanged(): config.AdvancedMovieSelection.video_preview_autostart.save() needRefresh = True if config.AdvancedMovieSelection.video_preview_fullscreen.isChanged(): config.AdvancedMovieSelection.video_preview_fullscreen.save() self.needsReopenFlag = True if config.AdvancedMovieSelection.video_preview.value and config.AdvancedMovieSelection.video_preview_fullscreen.isChanged(): config.AdvancedMovieSelection.video_preview.save() config.AdvancedMovieSelection.video_preview_fullscreen.save() self.needsReopenFlag = True if needRefresh: self.createSetup() if config.AdvancedMovieSelection.use_wastebasket.isChanged(): config.AdvancedMovieSelection.use_wastebasket.save() if config.AdvancedMovieSelection.use_wastebasket.value: self["MenuIcon"].show() else: self["MenuIcon"].hide() self.createSetup() if config.AdvancedMovieSelection.wastelist_buildtype.isChanged(): config.AdvancedMovieSelection.wastelist_buildtype.save() if config.AdvancedMovieSelection.use_wastebasket.value: self["MenuIcon"].show() else: self["MenuIcon"].hide() if config.AdvancedMovieSelection.debug.isChanged(): config.AdvancedMovieSelection.debug.save() from Source.Debug import Debug if config.AdvancedMovieSelection.debug.value: Debug.enable("/tmp/enigma2_stdout.log") else: Debug.disable() def createSetup(self): self.list = [] self.list.append(getConfigListEntry(_("Disable Advanced Movie Selection:"), config.AdvancedMovieSelection.ml_disable, _("Switch on/off the Advanced Movie Selection."))) self.list.append(getConfigListEntry(_("Start Advanced Movie Selection with:"), config.AdvancedMovieSelection.movie_launch, _("Select Start button for the Advanced Movie Selection."))) self.list.append(getConfigListEntry(_("Start on last movie location:"), config.AdvancedMovieSelection.startdir, _("Opens the film list on the last used location."))) self.list.append(getConfigListEntry(_("Start on first position in movielist:"), config.AdvancedMovieSelection.startonfirst, _("Always show selection in the first position in the movie list."))) self.list.append(getConfigListEntry(_("Show bookmarks in movielist:"), config.AdvancedMovieSelection.show_bookmarks, _("When enabled all created bookmarks appear in the movie list."))) self.list.append(getConfigListEntry(_("Show hotplug devices:"), config.AdvancedMovieSelection.hotplug, _("Enable this option to use USB-Devices."))) self.list.append(getConfigListEntry(_("Show plugin config in extensions menu from movielist:"), config.AdvancedMovieSelection.showmenu, _("Displays the Settings option in the menu at the movie list."))) self.list.append(getConfigListEntry(_("Show path selection for movie library in extensions menu:"), config.AdvancedMovieSelection.show_location_indexing, _("Here you can select which folders to include in the movie library creation."))) self.list.append(getConfigListEntry(_("Show movie library symbol in movielist:"), config.AdvancedMovieSelection.show_movielibrary, _("If enabled the movie library symbol is shown in movie list."))) self.list.append(getConfigListEntry(_("Show path marker within movie library movies:"), config.AdvancedMovieSelection.show_videodirslocation, _("If enabled all movies in movie library will be shown with path marker and will be sorted below them."))) self.list.append(getConfigListEntry(_("Use movie library path selection as marker within movies in library:"), config.AdvancedMovieSelection.movielibrary_mark, _("If enabled only the movie library path selections will be used as marker otherwise each sub directory will be shown as path marker in movie library view."))) self.list.append(getConfigListEntry(_("Minimum movie count to show path marker in movie library view:"), config.AdvancedMovieSelection.movielibrary_show_mark_cnt, _("The minimum selected number of movies must be in one directory to show the path marker in movie library view."))) self.list.append(getConfigListEntry(_("Show disk usage in description:"), config.AdvancedMovieSelection.show_diskusage, _("Displays the disk usage in the description. (Leave it disabled if you have performance problems at the start of the movie list)"))) self.list.append(getConfigListEntry(_("Show directory size in movie list:"), config.AdvancedMovieSelection.show_dirsize, _("Displays the size from directories in movie list."))) if config.AdvancedMovieSelection.show_dirsize.value: self.list.append(getConfigListEntry(_("Show decimal points:"), config.AdvancedMovieSelection.dirsize_digits, _("Here you can choose how many decimal points for the directory size in the movie list will be displayed."))) # TODO: remove # self.list.append(getConfigListEntry(_("Show full depth of directories:"), config.AdvancedMovieSelection.show_dirsize_full, _("Displays the full size of all sub directories of directory size."))) self.list.append(getConfigListEntry(_("Load Length of Movies in Movielist:"), config.usage.load_length_of_movies_in_moviellist, _("This option is for many of the functions from the Advanced Movie Selection necessary. If this option is disabled are many functions not available."))) if config.usage.load_length_of_movies_in_moviellist.value: self.list.append(getConfigListEntry(_("Show list options in extensions menu from movielist:"), config.AdvancedMovieSelection.showextras, _("Displays the various list view options
import discord from datetime import datetime from discord.utils import get async def msg(message, x, p, self): msg = x hasperms = False cmd = message.split() validcommands = [p + "hackban", p + "ban", p + "kick", p + "mute", p + "warn", p+ "clear", p+"unmute", p+ "approve"] if not cmd[0].lower() in validcommands: return if message.startswith(p + "hackban"): if msg.author.guild_permissions.ban_members: hasperms = True if not hasperms: embed = discord.Embed(title = "No Permissions!", description = "You do not have permission to run " + message + " in " + msg.guild.name, color=0x00ff00) embed.set_thumbnail(url = "https://freeiconshop.com/wp-content/uploads/edd/cross-flat.png") await msg.author.send(embed = embed) return params = message.split() bannedusers = 0 for users in params: if users == "!hackban": print("no user") else: bannedusers = bannedusers +1 print(users) user = await self.fetch_user(int(users)) await msg.guild.ban(user) channel = get(x.guild.channels, name="case_logs", type=discord.ChannelType.text) embed = discord.Embed(title = "Ban!", description = "<@" + str(msg.author.id) + "> has hackbanned " + str(bannedusers) + " users!\n" + message , color=0x00ff00) embed.set_thumbnail(url = "https://image.freepik.com/free-photo/judge-gavel-hammer-justice-law-concept_43403-625.jpg") await channel.send(embed = embed) if message.startswith(p + "ban"): if msg.author.guild_permissions.ban_members: hasperms = True if not hasperms: embed = discord.Embed(title = "No Permissions!", description = "You do not have permission to run " + message + " in " + msg.guild.name, color=0x00ff00) embed.set_thumbnail(url = "https://freeiconshop.com/wp-content/uploads/edd/cross-flat.png") await msg.author.send(embed = embed) return channel = get(x.guild.channels, name="case_logs", type=discord.ChannelType.text) if not x.mentions: await x.delete() params = message.split() for users in params: if users == "!ban": print("no user") else: user = await self.fetch_user(int(users)) if user.id == msg.author.id: return embed = discord.Embed(title = "Ban!", description = "<@" + str(msg.author.id) + "> has banned <@" + str(user.id) + ">!\n" + message , color=0x00ff00) await user.send("You have been banned from " + msg.guild.name + "\nThe reasoning can be found below: \n\n" + message) await msg.guild.ban(user) embed.set_thumbnail(url = "https://image.freepik.com/free-photo/judge-gavel-hammer-justice-law-concept_43403-625.jpg") await channel.send(embed = embed) for member in x.mentions: print(member) embed = discord.Embed(title = "Ban!", description = "<@" + str(msg.author.id) + "> has banned <@" + str(member.id) + ">!\n" + message , color=0x00ff00) if member.id == msg.author.id: return await member.send("You have been banned from " + msg.guild.name) await msg.guild.ban(member) embed.set_thumbnail(url = "https://image.freepik.com/free-photo/judge-gavel-hammer-justice-law-concept_43403-625.jpg") await channel.send(embed = embed) if message.startswith(p + "clear"): if msg.author.guild_permissions.kick_members: hasperms = True if hasperms == False: embed = discord.Embed(title = "No Permissions!", description = "You do not have permission to run " + message + " in " + msg.guild.name, color=0x00ff00) embed.set_thumbnail(url = "https://freeiconshop.com/wp-content/uploads/edd/cross-flat.png") await msg.author.send(embed = embed) return mess = message.split(" ") mess[1] = int(mess[1]) + 1 if int(mess[1]) > 100: mess[1] = 100 if int(mess[1]) < 2: mess[1] = 2 await msg.channel.purge(limit= int(mess[1])) channel = get(x.guild.channels, name="case_logs", type=discord.ChannelType.text) await channel.send(str(mess[1]) + " messages removed from #" + msg.channel.name) if message.startswith(p + "kick"): if msg.author.guild_permissions.kick_members: hasperms = True if hasperms == False: embed = discord.Embed(title = "No Permissions!", description = "You do not have permission to run " + message + " in " + msg.guild.name, color=0x00ff00) embed.set_thumbnail(url = "https://freeiconshop.com/wp-content/uploads/edd/cross-flat.png") await msg.author.send(embed = embed) return channel = get(x.guild.channels, name="case_logs", type=discord.ChannelType.text) if not x.mentions: await x.delete() params = message.split() for users in params: if users == "!kick": print("no user") else: user = await self.fetch_user(int(users)) if user.id == msg.author.id: return embed = discord.Embed(title = "Kick!", description = "<@" + str(msg.author.id) + "> has kicked <@" + str(user.id) + ">!\n" + message , color=0x00ff00) await user.send("You have been kicked from " + msg.guild.name) await msg.guild.kick(user) embed.set_thumbnail(url = "https://image.freepik.com/free-photo/judge-gavel-hammer-justice-law-concept_43403-625.jpg") await channel.send(embed = embed) for member in x.mentions: print(member) embed = discord.Embed(title = "Kick!", description = "<@" + str(msg.author.id) + "> has kicked <@" + str(member.id) + ">! \n\n**" + message + "**" , color=0x00ff00) if member.id == msg.author.id: return await member.send("You have been kicked from " + msg.guild.name + "\nThe reasoning can be found below: \n\n" + message) await msg.guild.kick(member) embed.set_thumbnail(url = "https://image.freepik.com/free-photo/judge-gavel-hammer-justice-law-concept_43403-625.jpg") await channel.send(embed = embed) if message.startswith(p + "approve"): if msg.author.guild_permissions.kick_members: hasperms = True if hasperms == False: embed = discord.Embed(title = "No Permissions!", description = "You do not have permission to run " + message + " in " + msg.guild.name, color=0x00ff00) embed.set_thumbnail(url = "https://freeiconshop.com/wp-content/uploads/edd/cross-flat.png") await msg.author.send(embed = embed) return if not x.mentions: await x.delete() params = message.split() for users in params: if users == "!approve": print("no user") else: user = x.guild.get_member(int(users)) role = get(x.guild.roles, name="New Floof") await user.remove_roles(role) role = get(x.guild.roles, name="Verified Floof") await user.add_roles(role) await x.channel.send("Approved") for member in x.mentions: role = get(x.guild.roles, name="New Floof") await member.remove_roles(role) role = get(x.guild.roles, name="Verified Floof") await member.add_roles(role) await x.channel.send("Approved") if message.startswith(p + "warn"): if msg.author.guild_permissions.kick_members: hasperms = True if hasperms == False: embed = discord.Embed(title = "No Permissions!", description = "You do not have permission to run " + message + " in " + msg.guild.name, color=0x00ff00) embed.set_thumbnail(url = "https://freeiconshop.com/wp-content/uploads/edd/cross-flat.png") await msg.author.send(embed = embed) return channel = get(x.guild.channels, name="case_logs", type=discord.ChannelType.text) if not x.mentions: await x.delete() params = message.split() for users in params: if users == "!warn": print("no user") else: user = await self.fetch_user(int(users)) if user.id == msg.author.id: return embed = discord.Embed(title = "Warn!", description = "<@" + str(msg.author.id) + "> has warned <@" + str(user.id) + ">!\n" + message , color=0x00ff00) await user.send("You have been warned on " + msg.guild.name + "\nThe reasoning can be found below: \n\n" + message) embed.set_thumbnail(url = "https://image.freepik.com/free-photo/judge-gavel-hammer-justice-law-concept_43403-625.jpg") await channel.send(embed = embed) for member in x.mentions: print(member) embed = discord.Embed(title = "Warn!", description = "<@" + str(msg.author.id) + "> has warned <@" + str(member.id) + ">! \n\n**" + message + "**", color=0x00ff00) if member.id == msg.author.id: return embed.set_thumbnail(url = "https://image.freepik.com/free-photo/judge-gavel-hammer-justice-law-concept_43403-625.jpg") await member.send("You have been warned on " + msg.guild.name + "\nThe reasoning can be found below: \n\n" + message) await channel.send(embed = embed) if message.startswith(p + "mute"): if msg.author.guild_permissions.kick_members: hasperms = True if hasperms == False: embed = discord.Embed(title = "No Permissions!", description = "You do not have permission to run " + message + " in " + msg.guild.name, color=0x00ff00) embed.set_thumbnail(url = "https://freeiconshop.com/wp-content/uploads/edd/cross-flat.png") await msg.author.send(embed = embed) return channel = get(x.guild.channels, name="case_logs", type=discord.ChannelType.text) if not x.mentions: await x.delete() params = message.split() for users in params: if users == "!mute": print("no user") else: user = x.guild.get_member(int(users)) if user.id == msg.author.id: return embed = discord.Embed(title = "Mute!", description = "<@" + str(msg.author.id) + "> has muted <@" + str(user.id) + ">!\n" + message , color=0x00ff00) if x.guild.id == 725201209358549012: role = get(x.guild.roles, name="Verified Floof") await user.remove_roles(role) role = get(x.guild.roles, name="Muted") await user.add_roles(role) await user.send("You have been muted on " + msg.guild.name + "\nThe reasoning can be found below: \n\n" + message) embed.set_thumbnail(url = "https://image.freepik.com/free-photo/judge-gavel-hammer-justice-law-concept_43403-625.jpg") await channel.send(embed = embed) for member in x.mentions: print(member) embed = discord.Embed(title = "Muted!", description = "<@" + str(msg.author.id) + "> has muted <@" + str(member.id) + ">! \n\n**" + message + "**", color=0x00ff00) if member.id == msg.author.id: return if x.guild.id == 725201209358549012: role = get(x.guild.roles, name="Verified Floof") await member.remove_roles(role) role = get(member.guild.roles, name="Muted") await member.add_roles(role) await member.send("You have been muted on " + msg.guild.name + "\nThe reasoning can be found below: \n\n" + message) embed.set_thumbnail(url = "https://image.freepik.com/free-photo/judge-gavel-hammer-justice-law-concept_43403-625.jpg") await channel.send(embed = embed) if message.startswith(p + "unmute"): if msg.author.guild_permissions.kick_members: hasperms = True if hasperms == False: embed = discord.Embed(title = "No Permissions!", description = "You do not have permission to run " + message + " in " + msg.guild.name, color=0x00ff00) embed.set_thumbnail(url = "https://freeiconshop.com/wp-content/uploads/edd/cross-flat.png") await msg.author.send(embed = embed) return channel = get(x.guild.channels, name="case_logs", type=discord.ChannelType.text) if not x.mentions: await x.delete() params = message.split() for users in params: if users == "!unmute": print("no user") else: user = x.guild.get_member(int(users)) if user.id == msg.author.id: return embed = discord.Embed(title = "Unmute!", description = "<@" + str(msg.author.id) + "> has unmuted <@" + str(user.id) + ">!\n" + message , color=0x00ff00) if x.guild.id == 725201209358549012: role = get(x.guild.roles, name="Verified Floof") await user.add_roles(role) role = get(x.guild.roles, name="Muted") await user.remove_roles(role) await user.send("You have been now been unmuted on " + msg.guild.name) embed.set_thumbnail(url = "https://image.freepik.com/free-photo/judge-gavel-hammer-justice-law-concept_43403-625.jpg") await channel.send(embed = embed) for member in x.mentions: print(member) embed = discord.Embed(title = "Unmuted!", description = "<@" + str(msg.author.id) + "> has unmuted
product_id, **kwargs): """ Get videos attached to product This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.get_videos_from_product_with_http_info(product_id, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int product_id: Product ID to fetch (required) :param int page: :param int per_page: :param str sort_by: Sort by this attribute (id by default) :param str sort_direction: Sorting direction (asc by default) :param str ip: Filter by user IP :param str filters: ``` name[value]=string&name][operator]=contains&date_add[value]=string&date_add[operator]=lt _______________ { \"name\": { \"value\": \"string\", \"operator\": \"contains\" }, \"date_add\": { \"value\": \"string\", \"operator\": \"lt\" } } ``` Operator can be: strict, contains, between, in, gt (greater than), lt (lower than). :return: ProductVideoListResponse If the method is called asynchronously, returns the request thread. """ all_params = ['product_id', 'page', 'per_page', 'sort_by', 'sort_direction', 'ip', 'filters'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_videos_from_product" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'product_id' is set if ('product_id' not in params) or (params['product_id'] is None): raise ValueError("Missing the required parameter `product_id` when calling `get_videos_from_product`") collection_formats = {} resource_path = '/products/{product_id}/videos'.replace('{format}', 'json') path_params = {} if 'product_id' in params: path_params['product_id'] = params['product_id'] query_params = {} if 'page' in params: query_params['page'] = params['page'] if 'per_page' in params: query_params['per_page'] = params['per_page'] if 'sort_by' in params: query_params['sort_by'] = params['sort_by'] if 'sort_direction' in params: query_params['sort_direction'] = params['sort_direction'] if 'ip' in params: query_params['ip'] = params['ip'] if 'filters' in params: query_params['filters'] = params['filters'] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProductVideoListResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def search_products(self, search_query, **kwargs): """ Search product with a keyword This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.search_products(search_query, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str search_query: Keyword used to search the products (required) :param int page: :param int per_page: :param str sort_by: Sort by this attribute (id by default) :param str sort_direction: Sorting direction (asc by default) :param str in_categories: Search in given categories. Values are separated with comma (,) :param str in_features: Search in given features. Values are separated with comma (,) :param str feature_values: Search by feature values. Values are separated with comma (,) :return: ProductListResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.search_products_with_http_info(search_query, **kwargs) else: (data) = self.search_products_with_http_info(search_query, **kwargs) return data def search_products_with_http_info(self, search_query, **kwargs): """ Search product with a keyword This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.search_products_with_http_info(search_query, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str search_query: Keyword used to search the products (required) :param int page: :param int per_page: :param str sort_by: Sort by this attribute (id by default) :param str sort_direction: Sorting direction (asc by default) :param str in_categories: Search in given categories. Values are separated with comma (,) :param str in_features: Search in given features. Values are separated with comma (,) :param str feature_values: Search by feature values. Values are separated with comma (,) :return: ProductListResponse If the method is called asynchronously, returns the request thread. """ all_params = ['search_query', 'page', 'per_page', 'sort_by', 'sort_direction', 'in_categories', 'in_features', 'feature_values'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method search_products" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'search_query' is set if ('search_query' not in params) or (params['search_query'] is None): raise ValueError("Missing the required parameter `search_query` when calling `search_products`") collection_formats = {} resource_path = '/products/search/{search_query}'.replace('{format}', 'json') path_params = {} if 'search_query' in params: path_params['search_query'] = params['search_query'] query_params = {} if 'page' in params: query_params['page'] = params['page'] if 'per_page' in params: query_params['per_page'] = params['per_page'] if 'sort_by' in params: query_params['sort_by'] = params['sort_by'] if 'sort_direction' in params: query_params['sort_direction'] = params['sort_direction'] if 'in_categories' in params: query_params['in_categories'] = params['in_categories'] if 'in_features' in params: query_params['in_features'] = params['in_features'] if 'feature_values' in params: query_params['feature_values'] = params['feature_values'] header_params = {} form_params = [] local_var_files = {} body_params = None # Authentication setting auth_settings = ['ApiClientId', 'ApiClientSecret'] return self.api_client.call_api(resource_path, 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='ProductListResponse', auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def set_product_geolocation(self, product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs): """ Handle geolocation for products by countries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.set_product_geolocation(product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int product_id: Product ID to fetch (required) :param int enabled: Enabled (required) :param str behavior_detected_countries: Behavior for detected countries (required) :param str behavior_non_detected_countries: Behavior for non-detected countries (required) :param str countries: IDs of the non-detected countries separated by comma :param int page: :param int per_page: :return: None If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.set_product_geolocation_with_http_info(product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs) else: (data) = self.set_product_geolocation_with_http_info(product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs) return data def set_product_geolocation_with_http_info(self, product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, **kwargs): """ Handle geolocation for products by countries This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.set_product_geolocation_with_http_info(product_id, enabled, behavior_detected_countries, behavior_non_detected_countries, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param int product_id: Product ID to fetch (required) :param int enabled: Enabled (required) :param str behavior_detected_countries: Behavior for detected countries (required) :param str behavior_non_detected_countries: Behavior for non-detected countries (required) :param str countries: IDs of the non-detected countries separated by comma :param int page: :param int per_page: :return: None If the method is called asynchronously, returns the request thread. """ all_params = ['product_id', 'enabled', 'behavior_detected_countries', 'behavior_non_detected_countries', 'countries', 'page', 'per_page'] all_params.append('callback') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method set_product_geolocation" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'product_id' is set if ('product_id' not in params) or (params['product_id'] is None): raise ValueError("Missing the required parameter `product_id` when calling `set_product_geolocation`") # verify the required parameter 'enabled' is set if ('enabled' not in params) or (params['enabled'] is None): raise ValueError("Missing the required parameter `enabled` when calling `set_product_geolocation`") # verify the required parameter 'behavior_detected_countries' is set if ('behavior_detected_countries' not in params) or (params['behavior_detected_countries'] is None): raise ValueError("Missing the required parameter `behavior_detected_countries` when calling `set_product_geolocation`") # verify the required parameter 'behavior_non_detected_countries' is set if ('behavior_non_detected_countries' not in params) or (params['behavior_non_detected_countries'] is None): raise ValueError("Missing the required parameter `behavior_non_detected_countries` when calling `set_product_geolocation`") collection_formats = {} resource_path = '/products/{product_id}/geolocations'.replace('{format}', 'json') path_params = {} if 'product_id' in params: path_params['product_id'] = params['product_id'] query_params = {} if 'page' in params: query_params['page'] = params['page'] if 'per_page' in params: query_params['per_page'] = params['per_page'] header_params = {} form_params = [] local_var_files = {} if 'countries' in params: form_params.append(('countries', params['countries'])) self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded') if 'enabled' in params: form_params.append(('enabled', params['enabled'])) self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded') if 'behavior_detected_countries' in params: form_params.append(('behavior_detected_countries', params['behavior_detected_countries'])) self.api_client.set_default_header('Content-Type',
values: values = [value] qb = SearchQueryBuilder() for concept_id, value in zip(concept_ids, values): term = OutputSearchTerm(concept_id=concept_id, value=value) qb.add_term(term) return self.search(qb, page, per_page, raw) def send_search_feedback(self, input_id, feedback_info=None): """ Send feedback for search Args: input_id: unique identifier for the input Returns: None """ feedback_input = Image(image_id=input_id, feedback_info=feedback_info) res = self.api.send_search_feedback(feedback_input) return res def update(self, image, action='merge'): """ Update the information of an input/image Args: image: an Image object that has concepts, metadata, etc. action: one of ['merge', 'overwrite'] 'merge' is to append the info onto the existing info, for either concept or metadata 'overwrite' is to overwrite the existing metadata and concepts with the existing ones Returns: an Image object Examples: >>> new_img = Image(image_id="abc", concepts=['c1', 'c2'], not_concepts=['c3'], >>> metadata={'key':'val'}) >>> app.inputs.update(new_img, action='overwrite') """ res = self.api.patch_inputs(action=action, inputs=[image]) one = res['inputs'][0] return self._to_obj(one) def bulk_update(self, images, action='merge'): """ Update the input update the information of an input/image Args: images: a list of Image objects that have concepts, metadata, etc. action: one of ['merge', 'overwrite'] 'merge' is to append the info onto the exising info, for either concept or metadata 'overwrite' is to overwrite the existing metadata and concepts with the existing ones Returns: an Image object Examples: >>> new_img1 = Image(image_id="abc1", concepts=['c1', 'c2'], not_concepts=['c3'], >>> metadata={'key':'val'}) >>> new_img2 = Image(image_id="abc2", concepts=['c1', 'c2'], not_concepts=['c3'], >>> metadata={'key':'val'}) >>> app.inputs.update([new_img1, new_img2], action='overwrite') """ ret = self.api.patch_inputs(action=action, inputs=images) objs = [self._to_obj(item) for item in ret['inputs']] return objs def delete_concepts(self, input_id, concepts): """ delete concepts from an input/image Args: input_id: unique ID of the input concepts: a list of concept names Returns: an Image object """ res = self.update(Image(image_id=input_id, concepts=concepts), action='remove') return res def bulk_merge_concepts(self, input_ids, concept_lists): """ bulk merge concepts from a list of input ids Args: input_ids: a list of input IDs concept_lists: a list of concept lists, each one corresponding to a listed input ID and filled with concepts to be added to that input Returns: an Input object Examples: >>> app.inputs.bulk_merge_concepts('id', [[('cat',True), ('dog',False)]]) """ if len(input_ids) != len(concept_lists): raise UserError('Argument error. please check') inputs = [] for input_id, concept_list in zip(input_ids, concept_lists): concepts = [] not_concepts = [] for concept_id, value in concept_list: if value is True: concepts.append(concept_id) else: not_concepts.append(concept_id) image = Image(image_id=input_id, concepts=concepts, not_concepts=not_concepts) inputs.append(image) res = self.bulk_update(inputs, action='merge') return res def bulk_delete_concepts(self, input_ids, concept_lists): """ bulk delete concepts from a list of input ids Args: input_ids: a list of input IDs concept_lists: a list of concept lists, each one corresponding to a listed input ID and filled with concepts to be deleted from that input Returns: an Input object Examples: >>> app.inputs.bulk_delete_concepts(['id'], [['cat', 'dog']]) """ # the reason list comprehension is not used is it breaks the 100 chars width inputs = [] for input_id, concepts in zip(input_ids, concept_lists): one_input = Image(image_id=input_id, concepts=concepts) inputs.append(one_input) res = self.bulk_update(inputs, action='remove') return res def merge_concepts(self, input_id, concepts, not_concepts, overwrite=False): """ Merge concepts for one input Args: input_id: the unique ID of the input concepts: the list of concepts not_concepts: the list of negative concepts overwrite: if True, this operation will replace the previous concepts. If False, it will append them. Returns: an Input object Examples: >>> app.inputs.merge_concepts('id', ['cat', 'kitty'], ['dog']) """ image = Image(image_id=input_id, concepts=concepts, not_concepts=not_concepts) if overwrite is True: action = 'overwrite' else: action = 'merge' res = self.update(image, action=action) return res def add_concepts(self, input_id, concepts, not_concepts): """ Add concepts for one input This is just an alias of `merge_concepts` for easier understanding when you try to add some new concepts to an image Args: input_id: the unique ID of the input concepts: the list of concepts not_concepts: the list of negative concepts Returns: an Input object Examples: >>> app.inputs.add_concepts('id', ['cat', 'kitty'], ['dog']) """ return self.merge_concepts(input_id, concepts, not_concepts) def merge_metadata(self, input_id, metadata): """ merge metadata for the image This is to merge/update the metadata of the given image Args: input_id: the unique ID of the input metadata: the metadata dictionary Examples: >>> # merge the metadata >>> # metadata will be appended to the existing key/value pairs >>> app.inputs.merge_metadata('id', {'key1':'value1', 'key2':'value2'}) """ image = Image(image_id=input_id, metadata=metadata) action = 'merge' res = self.update(image, action=action) return res def _to_search_obj(self, one): """ convert the search candidate to input object """ score = one['score'] one_input = self._to_obj(one['input']) one_input.score = score return one_input def _to_obj(self, one): # get concepts concepts = [] not_concepts = [] if one['data'].get('concepts'): for concept in one['data']['concepts']: if concept['value'] == 1: concepts.append(concept['name']) else: not_concepts.append(concept['name']) if not concepts: concepts = None if not not_concepts: not_concepts = None # get metadata metadata = one['data'].get('metadata', None) # get geo geo = geo_json = one['data'].get('geo', None) if geo_json is not None: geo_schema = { 'additionalProperties': False, 'type': 'object', 'properties': { 'geo_point': { 'type': 'object', 'properties': { 'longitude': {'type': 'number'}, 'latitude': {'type': 'number'} } } } } geo = Geo( GeoPoint(geo_json['geo_point']['longitude'], geo_json['geo_point']['latitude'])) # get regions regions = None regions_json = one['data'].get('regions', None) if regions_json: regions = [Region( region_id=r['id'], region_info=RegionInfo(bbox=BoundingBox(top_row=r['region_info']['bounding_box']['top_row'], left_col=r['region_info']['bounding_box']['left_col'], bottom_row=r['region_info']['bounding_box']['bottom_row'], right_col=r['region_info']['bounding_box']['right_col'])), face=Face(FaceIdentity([c for c in r['data']['face']['identity']['concepts']])) if 'data' in r else None) for r in regions_json] input_id = one['id'] if one['data'].get('image'): # get allow_dup_url allow_dup_url = one['data']['image'].get('allow_duplicate_url', False) if one['data']['image'].get('url'): if one['data']['image'].get('crop'): crop = one['data']['image']['crop'] one_input = Image(image_id=input_id, url=one['data']['image']['url'], concepts=concepts, not_concepts=not_concepts, crop=crop, metadata=metadata, geo=geo, regions=regions, allow_dup_url=allow_dup_url) else: one_input = Image(image_id=input_id, url=one['data']['image']['url'], concepts=concepts, not_concepts=not_concepts, metadata=metadata, geo=geo, regions=regions, allow_dup_url=allow_dup_url) elif one['data']['image'].get('base64'): if one['data']['image'].get('crop'): crop = one['data']['image']['crop'] one_input = Image(image_id=input_id, base64=one['data']['image']['base64'], concepts=concepts, not_concepts=not_concepts, crop=crop, metadata=metadata, geo=geo, regions=regions, allow_dup_url=allow_dup_url) else: one_input = Image(image_id=input_id, base64=one['data']['image']['base64'], concepts=concepts, not_concepts=not_concepts, metadata=metadata, geo=geo, regions=regions, allow_dup_url=allow_dup_url) elif one['data'].get('video'): raise UserError('Not supported yet') else: raise UserError('Unknown input type') if one.get('status'): one_input.status = ApiStatus(one['status']) return one_input def get_outputs(self, input_id): """ get the output predictions for a particular input Args: input_id: the unique identifier of the input Returns: the input with the output predictions """ return self.api.get_outputs(input_id) def remove_outputs_concepts(self, input_id, concept_ids): """ Remove concepts from the outputs predictions. The concept ids must be present in your app Args: input_id: the unique identifier of the input concept_ids: the list of concept ids to be removed Returns: the patched input in JSON object """ return self.api.patch_outputs(input_id, action='remove', concept_ids=concept_ids) def merge_outputs_concepts(self, input_id, concept_ids): """ Merge new concepts into the outputs predictions. The concept ids must be present in your app Args: input_id: the unique identifier of the input concept_ids: the list of concept ids to be merged Returns: the patched input in JSON object """ return self.api.patch_outputs(input_id, action='merge', concept_ids=concept_ids) class Concepts(object): def __init__(self, api): self.api = api def get_all(self): """ Get all concepts associated with the application Returns: all concepts in a generator function """ page = 1 per_page = 20 while True: res = self.api.get_concepts(page, per_page) if not res['concepts']: break for one in res['concepts']: yield self._to_obj(one) page += 1 def get_by_page(self, page=1, per_page=20): """ get concept with pagination Args: page: page number per_page: number of concepts to retrieve per page Returns: a list of Concept objects Examples: >>> for concept in app.concepts.get_by_page(2, 10): >>> print concept.concept_id """ res = self.api.get_concepts(page, per_page) results = [self._to_obj(one) for one in res['concepts']] return results def get(self, concept_id): """ Get a concept by id Args: concept_id: concept ID, the unique identifier of the concept Returns: If found, return the Concept object. Otherwise, return None Examples: >>> app.concepts.get('id') """ res = self.api.get_concept(concept_id) if res.get('concept'): concept = self._to_obj(res['concept']) else: concept = None return concept def search(self, term, lang=None): """ search concepts by concept name with wildcards Args: term: search term with wildcards allowed lang: language to search, if none is specified the default for the application will be used Returns: a generator function with all concepts pertaining to the search term Examples: >>> app.concepts.search('cat') >>> # search for Chinese label name >>> app.concepts.search(u'狗*', lang='zh') """ page = 1 per_page = 20 while True: res = self.api.search_concepts(term, page, per_page, lang) if not res['concepts']: break for one in res['concepts']: yield self._to_obj(one) page += 1 def update(self, concept_id, concept_name, action='overwrite'): """ Patch concept
<reponame>mbattistello/lambda_converters # This file was automatically generated by SWIG (http://www.swig.org). # Version 2.0.10 # # Do not make changes to this file unless you know what you are doing--modify # the SWIG interface file instead. from sys import version_info if version_info >= (3,0,0): new_instancemethod = lambda func, inst, cls: _Geom2dAPI.SWIG_PyInstanceMethod_New(func) else: from new import instancemethod as new_instancemethod if version_info >= (2,6,0): def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_Geom2dAPI', [dirname(__file__)]) except ImportError: import _Geom2dAPI return _Geom2dAPI if fp is not None: try: _mod = imp.load_module('_Geom2dAPI', fp, pathname, description) finally: fp.close() return _mod _Geom2dAPI = swig_import_helper() del swig_import_helper else: import _Geom2dAPI del version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. def _swig_setattr_nondynamic(self,class_type,name,value,static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'SwigPyObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name,None) if method: return method(self,value) if (not static): self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self,class_type,name,value): return _swig_setattr_nondynamic(self,class_type,name,value,0) def _swig_getattr(self,class_type,name): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name,None) if method: return method(self) raise AttributeError(name) def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) try: _object = object _newclass = 1 except AttributeError: class _object : pass _newclass = 0 def _swig_setattr_nondynamic_method(set): def set_attr(self,name,value): if (name == "thisown"): return self.this.own(value) if hasattr(self,name) or (name == "this"): set(self,name,value) else: raise AttributeError("You cannot add attributes to %s" % self) return set_attr class SwigPyIterator(object): thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract") __repr__ = _swig_repr __swig_destroy__ = _Geom2dAPI.delete_SwigPyIterator def __iter__(self): return self SwigPyIterator.value = new_instancemethod(_Geom2dAPI.SwigPyIterator_value,None,SwigPyIterator) SwigPyIterator.incr = new_instancemethod(_Geom2dAPI.SwigPyIterator_incr,None,SwigPyIterator) SwigPyIterator.decr = new_instancemethod(_Geom2dAPI.SwigPyIterator_decr,None,SwigPyIterator) SwigPyIterator.distance = new_instancemethod(_Geom2dAPI.SwigPyIterator_distance,None,SwigPyIterator) SwigPyIterator.equal = new_instancemethod(_Geom2dAPI.SwigPyIterator_equal,None,SwigPyIterator) SwigPyIterator.copy = new_instancemethod(_Geom2dAPI.SwigPyIterator_copy,None,SwigPyIterator) SwigPyIterator.next = new_instancemethod(_Geom2dAPI.SwigPyIterator_next,None,SwigPyIterator) SwigPyIterator.__next__ = new_instancemethod(_Geom2dAPI.SwigPyIterator___next__,None,SwigPyIterator) SwigPyIterator.previous = new_instancemethod(_Geom2dAPI.SwigPyIterator_previous,None,SwigPyIterator) SwigPyIterator.advance = new_instancemethod(_Geom2dAPI.SwigPyIterator_advance,None,SwigPyIterator) SwigPyIterator.__eq__ = new_instancemethod(_Geom2dAPI.SwigPyIterator___eq__,None,SwigPyIterator) SwigPyIterator.__ne__ = new_instancemethod(_Geom2dAPI.SwigPyIterator___ne__,None,SwigPyIterator) SwigPyIterator.__iadd__ = new_instancemethod(_Geom2dAPI.SwigPyIterator___iadd__,None,SwigPyIterator) SwigPyIterator.__isub__ = new_instancemethod(_Geom2dAPI.SwigPyIterator___isub__,None,SwigPyIterator) SwigPyIterator.__add__ = new_instancemethod(_Geom2dAPI.SwigPyIterator___add__,None,SwigPyIterator) SwigPyIterator.__sub__ = new_instancemethod(_Geom2dAPI.SwigPyIterator___sub__,None,SwigPyIterator) SwigPyIterator_swigregister = _Geom2dAPI.SwigPyIterator_swigregister SwigPyIterator_swigregister(SwigPyIterator) import OCC.Geom2d import OCC.MMgt import OCC.Standard import OCC.gp import OCC.GeomAbs import OCC.TColgp import OCC.TCollection import OCC.TColStd import OCC.Quantity import OCC.Extrema import OCC.math import OCC.Adaptor2d import OCC.Adaptor3d import OCC.Geom import OCC.TopAbs import OCC.Geom2dInt import OCC.IntRes2d import OCC.IntCurve import OCC.Intf import OCC.Bnd import OCC.Approx import OCC.AppCont import OCC.AppParCurves def register_handle(handle, base_object): """ Inserts the handle into the base object to prevent memory corruption in certain cases """ try: if base_object.IsKind("Standard_Transient"): base_object.thisHandle = handle base_object.thisown = False except: pass class Geom2dAPI_ExtremaCurveCurve(object): thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ * Computes the extrema between - the portion of the curve C1 limited by the two points of parameter (U1min,U1max), and - the portion of the curve C2 limited by the two points of parameter (U2min,U2max). Warning Use the function NbExtrema to obtain the number of solutions. If this algorithm fails, NbExtrema returns 0. :param C1: :type C1: Handle_Geom2d_Curve & :param C2: :type C2: Handle_Geom2d_Curve & :param U1min: :type U1min: Quantity_Parameter :param U1max: :type U1max: Quantity_Parameter :param U2min: :type U2min: Quantity_Parameter :param U2max: :type U2max: Quantity_Parameter :rtype: None """ _Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_swiginit(self,_Geom2dAPI.new_Geom2dAPI_ExtremaCurveCurve(*args)) def NbExtrema(self, *args): """ * Returns the number of extrema computed by this algorithm. Note: if this algorithm fails, NbExtrema returns 0. :rtype: int """ return _Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_NbExtrema(self, *args) def Points(self, *args): """ * Returns the points P1 on the first curve and P2 on the second curve, which are the ends of the extremum of index Index computed by this algorithm. Exceptions Standard_OutOfRange if Index is not in the range [ 1,NbExtrema ], where NbExtrema is the number of extrema computed by this algorithm. :param Index: :type Index: int :param P1: :type P1: gp_Pnt2d :param P2: :type P2: gp_Pnt2d :rtype: None """ return _Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_Points(self, *args) def Parameters(self, *args): """ * Returns the parameters U1 of the point on the first curve and U2 of the point on the second curve, which are the ends of the extremum of index Index computed by this algorithm. Exceptions Standard_OutOfRange if Index is not in the range [ 1,NbExtrema ], where NbExtrema is the number of extrema computed by this algorithm. :param Index: :type Index: int :param U1: :type U1: Quantity_Parameter & :param U2: :type U2: Quantity_Parameter & :rtype: None """ return _Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_Parameters(self, *args) def Distance(self, *args): """ * Computes the distance between the end points of the extremum of index Index computed by this algorithm. Exceptions Standard_OutOfRange if Index is not in the range [ 1,NbExtrema ], where NbExtrema is the number of extrema computed by this algorithm. :param Index: :type Index: int :rtype: Quantity_Length """ return _Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_Distance(self, *args) def NearestPoints(self, *args): """ * Returns the points P1 on the first curve and P2 on the second curve, which are the ends of the shortest extremum computed by this algorithm. Exceptions StdFail_NotDone if this algorithm fails. :param P1: :type P1: gp_Pnt2d :param P2: :type P2: gp_Pnt2d :rtype: None """ return _Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_NearestPoints(self, *args) def LowerDistanceParameters(self, *args): """ * Returns the parameters U1 of the point on the first curve and U2 of the point on the second curve, which are the ends of the shortest extremum computed by this algorithm. Exceptions StdFail_NotDone if this algorithm fails. :param U1: :type U1: Quantity_Parameter & :param U2: :type U2: Quantity_Parameter & :rtype: None """ return _Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_LowerDistanceParameters(self, *args) def LowerDistance(self, *args): """ * Computes the distance between the end points of the shortest extremum computed by this algorithm. Exceptions - StdFail_NotDone if this algorithm fails. :rtype: Quantity_Length """ return _Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_LowerDistance(self, *args) def Extrema(self, *args): """ :rtype: Extrema_ExtCC2d """ return _Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_Extrema(self, *args) __swig_destroy__ = _Geom2dAPI.delete_Geom2dAPI_ExtremaCurveCurve Geom2dAPI_ExtremaCurveCurve.NbExtrema = new_instancemethod(_Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_NbExtrema,None,Geom2dAPI_ExtremaCurveCurve) Geom2dAPI_ExtremaCurveCurve.Points = new_instancemethod(_Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_Points,None,Geom2dAPI_ExtremaCurveCurve) Geom2dAPI_ExtremaCurveCurve.Parameters = new_instancemethod(_Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_Parameters,None,Geom2dAPI_ExtremaCurveCurve) Geom2dAPI_ExtremaCurveCurve.Distance = new_instancemethod(_Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_Distance,None,Geom2dAPI_ExtremaCurveCurve) Geom2dAPI_ExtremaCurveCurve.NearestPoints = new_instancemethod(_Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_NearestPoints,None,Geom2dAPI_ExtremaCurveCurve) Geom2dAPI_ExtremaCurveCurve.LowerDistanceParameters = new_instancemethod(_Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_LowerDistanceParameters,None,Geom2dAPI_ExtremaCurveCurve) Geom2dAPI_ExtremaCurveCurve.LowerDistance = new_instancemethod(_Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_LowerDistance,None,Geom2dAPI_ExtremaCurveCurve) Geom2dAPI_ExtremaCurveCurve.Extrema = new_instancemethod(_Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_Extrema,None,Geom2dAPI_ExtremaCurveCurve) Geom2dAPI_ExtremaCurveCurve_swigregister = _Geom2dAPI.Geom2dAPI_ExtremaCurveCurve_swigregister Geom2dAPI_ExtremaCurveCurve_swigregister(Geom2dAPI_ExtremaCurveCurve) class Geom2dAPI_InterCurveCurve(object): thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag') __repr__ = _swig_repr def __init__(self, *args): """ * Create an empty intersector. Use the function Init for further initialization of the intersection algorithm by curves or curve. :rtype: None * Creates an object and computes the intersections between the curves C1 and C2. :param C1: :type C1: Handle_Geom2d_Curve & :param C2: :type C2: Handle_Geom2d_Curve & :param Tol: default value is 1.0e-6 :type Tol: float :rtype: None * Creates an object and computes self-intersections of the curve C1. Tolerance value Tol, defaulted to 1.0e-6, defines the precision of computing the intersection points. In case of a tangential intersection, Tol also defines the size of intersection segments (limited portions of the curves) where the distance between all points from two curves (or a curve in case of self-intersection) is less than Tol. Warning Use functions NbPoints and NbSegments to obtain the number of solutions. If the algorithm finds no intersections NbPoints and NbSegments return 0. :param C1: :type C1: Handle_Geom2d_Curve & :param Tol: default value is 1.0e-6 :type Tol: float :rtype: None """ _Geom2dAPI.Geom2dAPI_InterCurveCurve_swiginit(self,_Geom2dAPI.new_Geom2dAPI_InterCurveCurve(*args)) def Init(self, *args): """ * Initializes an algorithm with the given arguments and computes the intersections between the curves C1. and C2. :param C1: :type C1: Handle_Geom2d_Curve & :param C2: :type C2: Handle_Geom2d_Curve & :param Tol: default value is 1.0e-6 :type Tol: float :rtype: None * Initializes an algorithm with the given arguments and computes the self-intersections of the curve C1. Tolerance value Tol, defaulted to 1.0e-6, defines the precision of computing the intersection points. In case of a tangential intersection, Tol also defines the size of intersection segments (limited portions of the curves) where the distance between all points from two curves (or a curve in case of self-intersection) is less than Tol. Warning Use functions NbPoints and NbSegments to obtain the number of solutions. If the algorithm finds no intersections NbPoints and NbSegments return 0. :param C1: :type C1: Handle_Geom2d_Curve & :param Tol: default value is 1.0e-6 :type Tol: float :rtype: None """ return _Geom2dAPI.Geom2dAPI_InterCurveCurve_Init(self, *args) def NbPoints(self, *args): """ * Returns the number of intersection-points in case of cross intersections. NbPoints returns 0 if no intersections were found. :rtype: int """ return _Geom2dAPI.Geom2dAPI_InterCurveCurve_NbPoints(self, *args) def Point(self, *args): """ * Returns the intersection point of index Index. Intersection points are
<gh_stars>0 #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import json import logging import os import tarfile import warnings import itertools import csv import json from os import path as osp from typing import Dict, Optional, Tuple from collections import defaultdict import numpy as np import pickle as pkl import requests import torch from torch.utils.data import Dataset F50_CLASSES = ['bookshelves', 'lights', 'wall', 'nothing', 'storage', 'floor', 'windows', 'porch', 'roof', 'bed', 'door', 'window', 'foundation', 'torch', 'table', 'grass', 'ground', 'fence', 'flower', 'light', 'deck', 'stairs', 'railing', 'ceiling', 'pillar', 'chest', 'walkway', 'workbench', 'flowers', 'garden', 'bookshelf', 'chimney', 'patio', 'box', 'chests', 'stone', 'ladder', 'skylight', 'stove', 'support', 'furnace', 'steps', 'counter', 'torches', 'column', 'bookcase', 'decor', 'furniture', 'step', 'yard', 'dirt', 'balcony', 'bush', 'entryway', 'pink cubes'] class Craft3DDataset(Dataset): def __init__( self, data_dir: str, subset: str, remove=F50_CLASSES, block_dropout=[0, .1, .4] ): super().__init__() self.subset = subset self.data_dir = data_dir # raw training data fname = "" if subset == "train": fname = "training_data.pkl" elif subset == "valid": fname = "validation_data.pkl" fname = osp.join(self.data_dir, fname) self.unique_parts = defaultdict(lambda: 0) self.raw_items = self._load_raw(fname) # references to raw training data, with various augmentations specified self.items = self._create_items(self.raw_items, remove, block_dropout) def __getitem__(self, index): raw_idx, remove, dropout_p = self.items[index] schematic, inst_schem, instid2label = self.raw_items[raw_idx] # must call remove first because dropout is in place schematic = self._remove(schematic, remove, inst_schem, instid2label) schematic, _ = self._dropout(schematic, dropout_p) return schematic, remove, dropout_p def __len__(self): return len(self.items) def _load_raw(self, fname): self.max_dim = 0 if not osp.isfile(fname): raise RuntimeError(f"Split file not found at: {fname}") with open(fname, "rb") as f: raw_items = pkl.load(f) standardized_items = [] for item in raw_items: schematic = torch.from_numpy(item[0]) for dim in schematic.shape: if dim > self.max_dim: self.max_dim = dim schematic = self._standardize(schematic.permute(1, 2, 0)) inst_schem = torch.from_numpy(item[1]) inst_schem = self._standardize(inst_schem.permute(1, 2, 0)) instid2label = item[2] standardized_items.append((schematic, inst_schem, instid2label)) # track occurence of part types across dataset for label in set(instid2label): self.unique_parts[label] += 1 return standardized_items def _create_items(self, raw_items, parts_to_remove=["none"], dropout_ps=[0]): items = [] for i, raw_item in enumerate(raw_items): to_remove = set(raw_item[2]+["none"]) & set(parts_to_remove) for part in to_remove: for dropout_p in dropout_ps: items.append((i, part, dropout_p)) return items def _standardize(self, annotation, noise=0): standardized = torch.zeros(32, 32, 32) x, y, z = annotation.shape # centering with noise noise_y = min(16 - (y//2) - 1, noise) noise_z = min(16 - (z//2) - 1, noise) y_idx = max(0, 16 - (y//2)+noise_y) z_idx = max(0, 16 - (z//2)+noise_z) standardized[:x, y_idx:y_idx+y, z_idx:z_idx+z] = annotation return standardized def _remove(self, schematic, label_to_remove, inst_schem, instid2label): schematic = schematic.clone().detach() # schematic points to original tensor for i, label in enumerate(instid2label): if label == label_to_remove: schematic[inst_schem == i] = 0 return schematic def _dropout(self, schematic, p): mask = (torch.rand(schematic.shape) > p).float() schematic *= mask return schematic, mask def _print_statistics(self): print("unique parts: ", len(self.unique_parts)) class Craft3DDatasetAnno(Dataset): def __init__( self, data_dir: str, subset: str, noise: list = [0], regress_types: bool = False, only_popular_parts: bool = False, part_augment: bool = False, remove: str = None ): super().__init__() self.data_dir = data_dir self.max_dim = 64 self.subset = subset self.regress_types = regress_types self.only_popular_parts = only_popular_parts self.part_augment = part_augment self.remove = remove if self.subset not in ("train", "val", "test"): raise ValueError(f"Unknown subset: {self.subset}") self.blockname2id = self.create_blockname2id() self.id2blockname = dict((reversed(b2id) for b2id in self.blockname2id.items())) self.block_type_remap = self.create_block_type_remap() self.blockid2clsid = self.read_blockid2clsid() #self.blockid2clsid = dict(zip(self.id2blockname.keys(),range(len(self.id2blockname)))) self.unique_parts = defaultdict(lambda: 0) self.unique_types = defaultdict(lambda: 0) self._load_dataset() def __len__(self) -> int: """ Get number of valid blocks """ return len(self.all_structures) def __getitem__( self, index: int ) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]: """ Get the index-th valid block""" schem, seg_schem, label, idxs = self.all_structures[index] annotation = schem if not self.regress_types: annotation = (annotation > 0).float() if self.remove: a2 = self.remove_substructure(schem, seg_schem, idxs) if not self.regress_types: a2 = (a2 > 0).float() a1, a2 = annotation.float(), a2 return a1, a2 return annotation def _load_dataset(self): if self.subset == "train": split_fname = "training_data.pkl" else: split_fname = "validation_data.pkl" splits_path = osp.join(self.data_dir, split_fname) if not osp.isfile(splits_path): raise RuntimeError(f"Split file not found at: {splits_path}") with open(splits_path, "rb") as f: houses = pkl.load(f) self.all_structures = [] for house in houses: schematic, part_schem = house[0], house[1] # map instances to part labels part_labels = defaultdict(list) for i, label in enumerate(house[2][1:]): part_labels[label].append(i+1) schematic = self._standardize(np.transpose(schematic, (1, 2, 0))) part_schem = self._standardize(np.transpose(part_schem, (1, 2, 0))) if self.remove: self.all_structures.append( (torch.from_numpy(schematic), torch.from_numpy(part_schem),\ "house", part_labels[self.remove])) else: self.all_structures.append( (torch.from_numpy(schematic), torch.from_numpy(part_schem),\ "house", [])) if self.part_augment: for label, idxs in part_labels.items(): self.all_structures.append( (schematic, part_schem, label, idxs)) def create_part_labels(self, part_schem, part_labels): """Modifies part_schem in-place to create a new annotation with type annotation, as oppoosed to part annotations.""" for _, idxs in part_labels.items(): type_idx = idxs[0] for idx in idxs: part_schem[part_schem==idx] = type_idx return part_schem def create_blockname2id(self): blockname2id = {} block_map = os.path.join(self.data_dir, 'blocks.csv') with open(block_map, 'r') as csvfile: reader = csv.DictReader(csvfile) for row in reader: if int(row['id2']) == 0: # only use canonical category blockname2id[row['name']] = int(row['id1']) return blockname2id def create_block_type_remap(self): block_remap_names = {} block_remap = os.path.join(self.data_dir, "block_type_map.txt") with open(block_remap, "r") as f: for line in f: new_name, old_names = line.split("\"")[1], line.split("\"")[3] block_remap_names[new_name] = old_names.split(",") block_remap = {} for new_name, old_names in block_remap_names.items(): old_ids = [self.blockname2id[n] for n in old_names] block_remap[self.blockname2id[new_name]] = old_ids return block_remap def read_blockid2clsid(self): fpath = os.path.join(self.data_dir, 'bid2clsid_500f_post_remap.txt') blockid2clsid = {} with open(fpath, "r") as f: for line in f: bid, clsid = line.split(',') blockid2clsid[int(bid)] = int(clsid) return blockid2clsid def create_blockid2clsid(self): idxs = self.blockname2id.values() remapped_idxs = self.block_type_remap.values() remapped_idxs = list(itertools.chain.from_iterable(remapped_idxs)) remapped_idxs = [idx for idx in remapped_idxs if not idx in self.block_type_remap.keys()] idxs = [idx for idx in idxs if not idx in remapped_idxs] idxs = sorted(idxs) return dict(zip(idxs, range(len(idxs)))) def select_substructure(self, schematic, part_schem, idxs): structure = torch.zeros_like(schematic) for idx in idxs: structure[part_schem == idx] = 1 structure = structure * schematic return structure def remove_substructure(self, schematic, part_schem, idxs): structure = torch.ones_like(schematic) structure = structure * schematic for idx in idxs: structure[part_schem == idx] = 0 return structure def _standardize(self, annotation, noise=0): """Centers a numpy array in a 64 x 64 x 64 cube.""" standardized = np.zeros((64, 64, 64)) x, y, z = annotation.shape # centering with noise noise_y = min(32 - (y//2) - 1, noise) noise_z = min(32 - (z//2) - 1, noise) y_idx = max(0, 32 - (y//2)+noise_y) z_idx = max(0, 32 - (z//2)+noise_z) standardized[:x, y_idx:y_idx+y, z_idx:z_idx+z] = annotation return standardized class Craft3DDatasetStale(Dataset): NUM_BLOCK_TYPES = 256 def __init__( self, data_dir: str, subset: str, noise: list = [0], use_block_type: bool = False ): super().__init__() self.data_dir = data_dir self.subset = subset self.max_dim = 64 self.noise = noise self.use_block_type = use_block_type if self.subset not in ("train", "val", "test"): raise ValueError(f"Unknown subset: {self.subset}") self._load_dataset_fnames() def __len__(self) -> int: """ Get number of valid blocks """ return len(self._all_houses) def __getitem__( self, index: int ) -> Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]: """ Get the index-th valid block Returns: A tuple of inputs and targets, where inputs is a dict of ``` { "local": float tensor of shape (C * H, D, D, D), "global": float tensor of shape (1, G, G, G), "center": int tensor of shape (3,), the coordinate of the last block } ``` where C is the number of block types, H is the history length, D is the local size, and G is the global size. targets is a dict of ``` { "coords": int tensor of shape (A,) "types": int tensor of shape (A,) } ``` where A is the number of next steps to be considered as targets. """ annotation, n = self._all_houses[index] if isinstance(annotation, str): annotation = self._load_annotation(annotation) annotation[:,:,:,1] = 0 # zero out second block label annotation = annotation.sum(axis=-1) if not self.use_block_type: annotation = annotation > 0 annotation = annotation.int() else: annotation = self._create_prior(annotation) x, y, z = annotation.shape return self._standardize(annotation, noise=n) def _load_dataset_fnames(self): splits_path = osp.join(self.data_dir, "splits.json") if not osp.isfile(splits_path): raise RuntimeError(f"Split file not found at: {splits_path}") with open(splits_path, "r") as f: splits = json.load(f) self._all_houses = [] max_len = 0 for filename in
# Estadística para Datos Univariados Datos univariados (o data univariada) son datos que se describen con una sola variable. Por ejemplo, las alturas de los compñaeros de clase son datos univariados. El propósito principal del análisis de datos univariados es la descripción de los datos. El análisis de datos univariados no considera relaciones entre distintas variables, como podría ser la relación entre la altura y el peso de los compañeros de clase. import math import random import numpy as np import pandas as pd import plotly.express as px ## Muestreo (Sampling) **Definiciones** - **Población:** el grupo entero del que queremos estudiar una característica. Por ejemplo, todos las mujeres de Chile, todos los hogares de la comuna de Providencia. - **Muestra (Sample):** el subgrupo de la población que se utiliza para inferir propiedades de la población. Por ejemplo, para estudiar alguna propiedad de las mujeres de Chile, utilizamos una muestra de mujeres que consiste en 10 mujeres de cada comuna de Chile. ### Técnicas de Muestreo - **Muestreo por Conveniencia (convenience sampling):** se seleccionan aquellos miembros de la población que son de más fácil acceso. Por ejemplo, para el ejemplo de las mujeres de Chile, utilizo como muestra a las mujeres de mi colegio. - **Muestro Aleatorio Simple (simple random sampling):** cada miembro de la población tiene la misma probabilidad de ser elegido. Por ejemplo, con un generador de números aleatorios genramos RUTs y elegimos los RUTs generados que correspondan a mujeres. - **Muestreo Sistemático (systematic sampling):** Se listan (ordenan) los miembros de la población y se eligen a partir de un número inicial y un intervalo fijo. - **Muestreo Estratificado (stratified sampling):** Se divide la población en subgrupos más pequeños (estratos). Los estratos se construyen basándose en características comunes de sus miembros. Luego, se elige una muestra aleatoria de cada estrato. - **Muestreo por Cuota (quota sampling):** Muy similar al muestro estratificado, pero el tamaño de la muestra de cada estrato depende de la proporción del estrato en la población total. ### Tipos de Datos - **Datos Discretos:** datos cuyos valore posibles pueden ser contados (incluso si en teoría el número de datos posibles es infinito). Por ejemplo, la talla de zapatos es un dato discreto ya que sólo existe un número finito de tallas posibles. - **Datos continuos:** datos cuyos valores posibles no pueden ser contados. Generalmente se representan con un número real (con decimales). Por ejemplo, la altura de cada individuo. La temepratura a una cierta hora del día en un lugar preespecificado de la ciudad. ## Presentación de los Datos Para datos discretos, la herramienta más usual de presentación son la tabla y gráfico de frecuencias. **Ejemplo:** Consideremos las notas de 32 alumnos en un test en el cual se puede obtener una nota entera del 0 al 10. Supongamos que los resultados son los siguientes: resultados = [0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 9, 10] Contamos cuantas ocurrencias de cada nota hay en la muestra: frecuencia = [(i, resultados.count(i)) for i in range(11)] df_frecuencia = pd.DataFrame(frecuencia, columns=['nota', 'frecuencia']) df_frecuencia Mostramos los datos de la tabla anterior en un gráfico de barras. fig = px.bar(df_frecuencia, x='nota', y=['frecuencia',], title=f'Frecuencia de Notas') fig.show() Para datos contínuos, en cambio, la herramienta más usual es un histograma. Un histograma también representa la frecuencia de ocurrencia de datos, pero, al tratarse de datos contínuos, se representa la frecuencia de ocurrencia de datos en un cierto intervalo. Veamos en ejemplo considerando una serie histórica de precios del USD en términos del CLP (USDCLP) de 10 años. df_usdclp = pd.read_excel('data/20210312_10Y_usdclp.xlsx') fig = px.line(df_usdclp, x='fecha', y=['valor',], title=f'Serie Histórica USDCLP') fig.show() Podemos ver como los valores están entre 450 y 870 aproximadamente. Vamos a dividir ese intervalo en subintervalos de 10 CLP y luego graficaremos (con un gráfico de barras) la frecuencia de precios observados en cada uno de esos subintervalos. fig = px.histogram( df_usdclp, x="valor", title='Histograma USDCLP - Frecuencia en Intervalos del 10 CLP') fig.show() ### Forma del Histograma Es importante describir la forma del histograma, la principal característica de un histograma es la presencia de sesgo (skew): df_sim = pd.DataFrame([(0,1), (1,2), (2,3), (3,4), (4,5), (5,6), (6,8), (7,9), (8,11), (9,10), (10,8)], columns=['intervalo', 'frecuencia']) fig = px.bar(df_sim, x='intervalo', y=['frecuencia',], title=f'Sesgo Negativo') fig.show() df_sim = pd.DataFrame([(0,8), (1,10), (2,11), (3,9), (4,8), (5,6), (6,5), (7,4), (8,3), (9,2), (10,1)], columns=['intervalo', 'frecuencia']) fig = px.bar(df_sim, x='intervalo', y=['frecuencia',], title=f'Sesgo Positivo') fig.show() df_sim = pd.DataFrame([(0,1), (1,2), (2,3), (3,4), (4,5), (5,6), (6,5), (7,4), (8,3), (9,2), (10,1)], columns=['intervalo', 'frecuencia']) fig = px.bar(df_sim, x='intervalo', y=['frecuencia',], title=f'Sin Sesgo') fig.show() ## Medidas de Tendencia Central Hasta ahora hemos visto como recopilar y presentar datos. El próximo paso es elegir un único valor que pueda representar la data de forma general. Una medida de tendencia central que nos indica donde está "la mitad" de los datos recopilados. Las medidas más comunes de tendencia central son: - moda - media - mediana ### Moda **Definición:** la moda es el valor que ocurre con más frecuencia en los datos. **Tips:** - Puede haber más de una *moda* si dos o más valores son los que ocurren con mayor frecuencia. - Si no hay ningún valor de la muestra que ocurra con mayor frecuencia (todos ocurren sólo una vez) entonces la muestra no tiene *moda*. **Ejemplo:** data = [4, 7, 3, 3, 1, 2, 7, 5, 7, 11] contador = {elem: data.count(elem) for elem in set(data)} highest_counter = [(k, v) for k, v in contador.items() if v == max(contador.values())] print(f'La moda es: {highest_counter[0][0]}') Cuando los datos se presentan en una tabla de frecuencias, la moda es el grupo que tiene la más alta frecuencia. En el gráfico de barras, es el grupo con la barra más alta. df_frecuencia = pd.DataFrame.from_dict(contador, orient='index') df_frecuencia.columns = ['frecuencia'] df_frecuencia fig = px.bar(df_frecuencia, x=df_frecuencia.index, y=['frecuencia',], title=f'Gráfico de Barras Notas') fig.show() #### La Clase Modal Cuando se busca la modal de datos que han sido agrupados, se debe determinar el **grupo** que tiene la mayor frecuencia. A este grupo se le llama la **clase modal**. Si revisar toda la data, no se puede determinar cula valor dentro de la clase modal es el que tiene la mayor frecuencia. ### Media La media aritmética, también llamada promedio, es la medida más común de tendencia central. La media es simplemente la suma de todos los valores, dividida por el número total de datos. Usualmente se denota con $\mu$ o $\overline x$. De forma más matemática: $$\overline x = \frac{\sum_{i=1}^N x_i}{N}$$ Al contrario de la moda, la media, usualmente, es un número que no pertenece a los datos. Por ejemplo, si tus notas son 6, 6, 7 y 7 la media será 6.5 que no coincide con ninguna de las notas obtenidas. ¿Cómo se obtiene la media de los datos a partir de la tabla de frecuencias? **Respuesta:** en el caso anterior la media se obtiene con la siguiente fórmula. $$\overline x =\frac{\sum_{i=1}^N f_i\cdot x_i}{\sum_{i=1}^N f_i}$$ donde $f_i$ es la frecuencia de la observación $x_i$. ### Mediana La mediana es el dato que está justo en el medio cuando los datos se ordenan de forma ascendente. Si el número de datos es par, entonces la mediana es la media de los dos datos que están en el medio. Esto implica que 50% de los datos están a la izquierda de la mediana y 50% de los datos están a la derecha de la mediana. **Ejemplo:** Encontrar la mediana de 7, 12, 1, 4, 17, 9, 11, 16, 10, 18. datos = [7, 12, 1, 4, 2, 17, 9, 11, 16, 10, 18] datos.sort() print(f'Los datos ordenados son: {datos}') Son 11 elementos, el número del medio es entonces el número 6. Por lo tanto la mediana es: print(f'mediana: {datos[6]}') ### Resumen ````{panels} :column: col-4 :card: border-2 Moda ^^^ La **moda** cual es el valor que con más frecuencia ocurre en la muestra. **Ventajas** - Los valores extremos no afectan la moda. **Desventajas** - No utiliza todos los elementos del conjunto de datos. - No es necesariamente única. Puede haber más de una **moda**. En estos casos su interpretación se hace difícil. - La **moda** no está definida cuando ningún valor se repite. --- Media ^^^ La media es la suma de todos los datos dividida por el número total de datos. **Ventajas** - Es la medida más popular
<filename>src/cuda4py/_impl/cudnn/_cffi.py """ Copyright (c) 2014, Samsung Electronics Co.,Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied, of Samsung Electronics Co.,Ltd.. """ """ cuda4py - CUDA cffi bindings and helper classes. URL: https://github.com/ajkxyz/cuda4py Original author: <NAME> <<EMAIL>> """ """ cuDNN cffi bindings. """ import cffi import cuda4py._cffi as cuffi from cuda4py._py import CU #: ffi parser ffi = None #: Loaded shared library lib = None #: Error codes CUDNN_STATUS_SUCCESS = 0 CUDNN_STATUS_NOT_INITIALIZED = 1 CUDNN_STATUS_ALLOC_FAILED = 2 CUDNN_STATUS_BAD_PARAM = 3 CUDNN_STATUS_INTERNAL_ERROR = 4 CUDNN_STATUS_INVALID_VALUE = 5 CUDNN_STATUS_ARCH_MISMATCH = 6 CUDNN_STATUS_MAPPING_ERROR = 7 CUDNN_STATUS_EXECUTION_FAILED = 8 CUDNN_STATUS_NOT_SUPPORTED = 9 CUDNN_STATUS_LICENSE_ERROR = 10 #: Error descriptions ERRORS = { CUDNN_STATUS_NOT_INITIALIZED: "CUDNN_STATUS_NOT_INITIALIZED", CUDNN_STATUS_ALLOC_FAILED: "CUDNN_STATUS_ALLOC_FAILED", CUDNN_STATUS_BAD_PARAM: "CUDNN_STATUS_BAD_PARAM", CUDNN_STATUS_INTERNAL_ERROR: "CUDNN_STATUS_INTERNAL_ERROR", CUDNN_STATUS_INVALID_VALUE: "CUDNN_STATUS_INVALID_VALUE", CUDNN_STATUS_ARCH_MISMATCH: "CUDNN_STATUS_ARCH_MISMATCH", CUDNN_STATUS_MAPPING_ERROR: "CUDNN_STATUS_MAPPING_ERROR", CUDNN_STATUS_EXECUTION_FAILED: "CUDNN_STATUS_EXECUTION_FAILED", CUDNN_STATUS_NOT_SUPPORTED: "CUDNN_STATUS_NOT_SUPPORTED", CUDNN_STATUS_LICENSE_ERROR: "CUDNN_STATUS_LICENSE_ERROR" } #: cudnnDataType_t CUDNN_DATA_FLOAT = 0 CUDNN_DATA_DOUBLE = 1 CUDNN_DATA_HALF = 2 #: cudnnTensorFormat_t CUDNN_TENSOR_NCHW = 0 CUDNN_TENSOR_NHWC = 1 #: cudnnConvolutionMode_t CUDNN_CONVOLUTION = 0 CUDNN_CROSS_CORRELATION = 1 #: cudnnConvolutionFwdPreference_t CUDNN_CONVOLUTION_FWD_NO_WORKSPACE = 0 CUDNN_CONVOLUTION_FWD_PREFER_FASTEST = 1 CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT = 2 #: cudnnConvolutionFwdAlgo_t CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM = 0 CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM = 1 CUDNN_CONVOLUTION_FWD_ALGO_GEMM = 2 CUDNN_CONVOLUTION_FWD_ALGO_DIRECT = 3 CUDNN_CONVOLUTION_FWD_ALGO_FFT = 4 CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING = 5 #: cudnnConvolutionBwdFilterPreference_t CUDNN_CONVOLUTION_BWD_FILTER_NO_WORKSPACE = 0 CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST = 1 CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT = 2 #: cudnnConvolutionBwdFilterAlgo_t CUDNN_CONVOLUTION_BWD_FILTER_ALGO_0 = 0 # non-deterministic CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1 = 1 CUDNN_CONVOLUTION_BWD_FILTER_ALGO_FFT = 2 CUDNN_CONVOLUTION_BWD_FILTER_ALGO_3 = 3 # non-deterministic with workspace #: cudnnConvolutionBwdDataPreference_t CUDNN_CONVOLUTION_BWD_DATA_NO_WORKSPACE = 0 CUDNN_CONVOLUTION_BWD_DATA_PREFER_FASTEST = 1 CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT = 2 #: cudnnConvolutionBwdDataAlgo_t CUDNN_CONVOLUTION_BWD_DATA_ALGO_0 = 0 # non-deterministic CUDNN_CONVOLUTION_BWD_DATA_ALGO_1 = 1 CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT = 2 CUDNN_CONVOLUTION_BWD_DATA_ALGO_FFT_TILING = 3 #: cudnnPoolingMode_t CUDNN_POOLING_MAX = 0 CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING = 1 CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING = 2 #: cudnnNanPropagation_t CUDNN_NOT_PROPAGATE_NAN = 0 CUDNN_PROPAGATE_NAN = 1 #: cudnnRNNMode_t CUDNN_RNN_RELU = 0 CUDNN_RNN_TANH = 1 CUDNN_LSTM = 2 CUDNN_GRU = 3 #: cudnnDirectionMode_t CUDNN_UNIDIRECTIONAL = 0 CUDNN_BIDIRECTIONAL = 1 #: cudnnRNNInputMode_t CUDNN_LINEAR_INPUT = 0 CUDNN_SKIP_INPUT = 1 #: cudnnSoftmaxAlgorithm_t CUDNN_SOFTMAX_FAST = 0 # does NOT max to avoid overflow CUDNN_SOFTMAX_ACCURATE = 1 # subtracts max to avoid overflow CUDNN_SOFTMAX_LOG = 2 #: cudnnSoftmaxMode_t CUDNN_SOFTMAX_MODE_INSTANCE = 0 # compute over all C, H, W for each N CUDNN_SOFTMAX_MODE_CHANNEL = 1 # compute over all C for each H, W, N #: Cached cudnn version cudnn_version = 0 def _initialize(backends): global lib if lib is not None: return # C function definitions # size_t instead of void* is used # for convinience with python calls and numpy arrays, # cffi automatically calls int() on objects also. src = """ typedef int cudnnStatus_t; typedef size_t cudnnHandle_t; typedef size_t cudnnTensorDescriptor_t; typedef size_t cudnnConvolutionDescriptor_t; typedef size_t cudnnFilterDescriptor_t; typedef size_t cudnnPoolingDescriptor_t; typedef int cudnnTensorFormat_t; typedef int cudnnDataType_t; typedef int cudnnConvolutionMode_t; typedef int cudnnConvolutionFwdPreference_t; typedef int cudnnConvolutionFwdAlgo_t; typedef int cudnnPoolingMode_t; typedef int cudnnSoftmaxAlgorithm_t; typedef int cudnnSoftmaxMode_t; size_t cudnnGetVersion(); cudnnStatus_t cudnnCreate(cudnnHandle_t *handle); cudnnStatus_t cudnnDestroy(cudnnHandle_t handle); cudnnStatus_t cudnnCreateTensorDescriptor( cudnnTensorDescriptor_t *tensorDesc); cudnnStatus_t cudnnDestroyTensorDescriptor( cudnnTensorDescriptor_t tensorDesc); cudnnStatus_t cudnnSetTensor4dDescriptor( cudnnTensorDescriptor_t tensorDesc, cudnnTensorFormat_t format, cudnnDataType_t dataType, int n, int c, int h, int w); cudnnStatus_t cudnnGetTensor4dDescriptor( const cudnnTensorDescriptor_t tensorDesc, cudnnDataType_t *dataType, int *n, int *c, int *h, int *w, int *nStride, int *cStride, int *hStride, int *wStride); cudnnStatus_t cudnnSetTensorNdDescriptor( cudnnTensorDescriptor_t tensorDesc, cudnnDataType_t dataType, int nbDims, const int *dimA, const int *strideA); cudnnStatus_t cudnnGetTensorNdDescriptor( const cudnnTensorDescriptor_t tensorDesc, int nbDimsRequested, cudnnDataType_t *dataType, int *nbDims, int *dimA, int *strideA); cudnnStatus_t cudnnCreateFilterDescriptor( cudnnFilterDescriptor_t *filterDesc); cudnnStatus_t cudnnDestroyFilterDescriptor( cudnnFilterDescriptor_t filterDesc); cudnnStatus_t cudnnCreateConvolutionDescriptor( cudnnConvolutionDescriptor_t *convDesc); cudnnStatus_t cudnnDestroyConvolutionDescriptor( cudnnConvolutionDescriptor_t convDesc); cudnnStatus_t cudnnSetConvolution2dDescriptor( cudnnConvolutionDescriptor_t convDesc, int pad_h, int pad_w, int u, int v, int upscalex, int upscaley, cudnnConvolutionMode_t mode); cudnnStatus_t cudnnGetConvolution2dForwardOutputDim( const cudnnConvolutionDescriptor_t convDesc, const cudnnTensorDescriptor_t inputTensorDesc, const cudnnFilterDescriptor_t filterDesc, int *n, int *c, int *h, int *w); cudnnStatus_t cudnnGetConvolutionForwardAlgorithm( cudnnHandle_t handle, const cudnnTensorDescriptor_t srcDesc, const cudnnFilterDescriptor_t filterDesc, const cudnnConvolutionDescriptor_t convDesc, const cudnnTensorDescriptor_t destDesc, cudnnConvolutionFwdPreference_t preference, size_t memoryLimitInbytes, cudnnConvolutionFwdAlgo_t *algo); cudnnStatus_t cudnnGetConvolutionForwardWorkspaceSize( cudnnHandle_t handle, const cudnnTensorDescriptor_t srcDesc, const cudnnFilterDescriptor_t filterDesc, const cudnnConvolutionDescriptor_t convDesc, const cudnnTensorDescriptor_t destDesc, cudnnConvolutionFwdAlgo_t algo, size_t *sizeInBytes); cudnnStatus_t cudnnConvolutionForward( cudnnHandle_t handle, const intptr_t alpha, const cudnnTensorDescriptor_t srcDesc, const intptr_t srcData, const cudnnFilterDescriptor_t filterDesc, const intptr_t filterData, const cudnnConvolutionDescriptor_t convDesc, cudnnConvolutionFwdAlgo_t algo, intptr_t workSpace, size_t workSpaceSizeInBytes, const intptr_t beta, const cudnnTensorDescriptor_t destDesc, intptr_t destData); cudnnStatus_t cudnnConvolutionBackwardBias( cudnnHandle_t handle, const intptr_t alpha, const cudnnTensorDescriptor_t srcDesc, const intptr_t srcData, const intptr_t beta, const cudnnTensorDescriptor_t destDesc, intptr_t destData); cudnnStatus_t cudnnTransformTensor( cudnnHandle_t handle, const intptr_t alpha, const cudnnTensorDescriptor_t srcDesc, const intptr_t srcData, const intptr_t beta, const cudnnTensorDescriptor_t destDesc, intptr_t destData); cudnnStatus_t cudnnCreatePoolingDescriptor( cudnnPoolingDescriptor_t *poolingDesc); cudnnStatus_t cudnnDestroyPoolingDescriptor( cudnnPoolingDescriptor_t poolingDesc); cudnnStatus_t cudnnGetPooling2dForwardOutputDim( const cudnnPoolingDescriptor_t poolingDesc, const cudnnTensorDescriptor_t inputTensorDesc, int *n, int *c, int *h, int *w); cudnnStatus_t cudnnPoolingForward( cudnnHandle_t handle, const cudnnPoolingDescriptor_t poolingDesc, const intptr_t alpha, const cudnnTensorDescriptor_t xDesc, const intptr_t x, const intptr_t beta, const cudnnTensorDescriptor_t yDesc, intptr_t y); cudnnStatus_t cudnnPoolingBackward( cudnnHandle_t handle, const cudnnPoolingDescriptor_t poolingDesc, const intptr_t alpha, const cudnnTensorDescriptor_t yDesc, const intptr_t y, const cudnnTensorDescriptor_t dyDesc, const intptr_t dy, const cudnnTensorDescriptor_t xDesc, const intptr_t x, const intptr_t beta, const cudnnTensorDescriptor_t dxDesc, intptr_t dx); cudnnStatus_t cudnnSoftmaxForward( cudnnHandle_t handle, cudnnSoftmaxAlgorithm_t algo, cudnnSoftmaxMode_t mode, const intptr_t alpha, const cudnnTensorDescriptor_t xDesc, const intptr_t x, const intptr_t beta, const cudnnTensorDescriptor_t yDesc, intptr_t y); cudnnStatus_t cudnnSoftmaxBackward( cudnnHandle_t handle, cudnnSoftmaxAlgorithm_t algo, cudnnSoftmaxMode_t mode, const intptr_t alpha, const cudnnTensorDescriptor_t yDesc, const intptr_t y, const cudnnTensorDescriptor_t dyDesc, const intptr_t dy, const intptr_t beta, const cudnnTensorDescriptor_t dxDesc, intptr_t dx); """ src2 = """ cudnnStatus_t cudnnConvolutionBackwardFilter( cudnnHandle_t handle, const intptr_t alpha, const cudnnTensorDescriptor_t srcDesc, const intptr_t srcData, const cudnnTensorDescriptor_t diffDesc, const intptr_t diffData, const cudnnConvolutionDescriptor_t convDesc, const intptr_t beta, const cudnnFilterDescriptor_t gradDesc, intptr_t gradData); cudnnStatus_t cudnnConvolutionBackwardData( cudnnHandle_t handle, const intptr_t alpha, const cudnnFilterDescriptor_t filterDesc, const intptr_t filterData, const cudnnTensorDescriptor_t diffDesc, const intptr_t diffData, const cudnnConvolutionDescriptor_t convDesc, const intptr_t beta, const cudnnTensorDescriptor_t gradDesc, intptr_t gradData); """ src4p = """ typedef int cudnnConvolutionBwdFilterAlgo_t; typedef int cudnnConvolutionBwdDataAlgo_t; typedef int cudnnConvolutionBwdFilterPreference_t; typedef int cudnnConvolutionBwdDataPreference_t; cudnnStatus_t cudnnGetConvolutionBackwardFilterAlgorithm( cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const cudnnTensorDescriptor_t dyDesc, const cudnnConvolutionDescriptor_t convDesc, const cudnnFilterDescriptor_t dwDesc, cudnnConvolutionBwdFilterPreference_t preference, size_t memoryLimitInBytes, cudnnConvolutionBwdFilterAlgo_t *algo); cudnnStatus_t cudnnGetConvolutionBackwardFilterWorkspaceSize( cudnnHandle_t handle, const cudnnTensorDescriptor_t xDesc, const cudnnTensorDescriptor_t dyDesc, const cudnnConvolutionDescriptor_t convDesc, const cudnnFilterDescriptor_t gradDesc, cudnnConvolutionBwdFilterAlgo_t algo, size_t *sizeInBytes); cudnnStatus_t cudnnConvolutionBackwardFilter( cudnnHandle_t handle, const intptr_t alpha, const cudnnTensorDescriptor_t srcDesc, const intptr_t srcData, const cudnnTensorDescriptor_t diffDesc, const intptr_t diffData, const cudnnConvolutionDescriptor_t convDesc, const cudnnConvolutionBwdFilterAlgo_t algo, intptr_t workSpace, size_t workSpaceSizeInBytes, const intptr_t beta, const cudnnFilterDescriptor_t gradDesc, intptr_t gradData); cudnnStatus_t cudnnGetConvolutionBackwardDataAlgorithm( cudnnHandle_t handle, const cudnnFilterDescriptor_t wDesc, const cudnnTensorDescriptor_t dyDesc, const cudnnConvolutionDescriptor_t convDesc, const cudnnTensorDescriptor_t dxDesc, cudnnConvolutionBwdDataPreference_t preference, size_t memoryLimitInBytes, cudnnConvolutionBwdDataAlgo_t *algo); cudnnStatus_t cudnnGetConvolutionBackwardDataWorkspaceSize( cudnnHandle_t handle, const cudnnFilterDescriptor_t wDesc, const cudnnTensorDescriptor_t dyDesc, const cudnnConvolutionDescriptor_t convDesc, const cudnnTensorDescriptor_t dxDesc, cudnnConvolutionBwdDataAlgo_t algo, size_t *sizeInBytes); cudnnStatus_t cudnnConvolutionBackwardData( cudnnHandle_t handle, const intptr_t alpha, const cudnnFilterDescriptor_t filterDesc, const intptr_t filterData, const cudnnTensorDescriptor_t diffDesc, const intptr_t diffData, const cudnnConvolutionDescriptor_t convDesc, const cudnnConvolutionBwdDataAlgo_t algo, intptr_t workSpace, size_t workSpaceSizeInBytes, const intptr_t beta, const cudnnTensorDescriptor_t gradDesc, intptr_t gradData); """ src24 = """ cudnnStatus_t cudnnSetFilter4dDescriptor( cudnnFilterDescriptor_t filterDesc, cudnnDataType_t dataType, int k, int c, int h, int w); cudnnStatus_t cudnnSetPooling2dDescriptor( cudnnPoolingDescriptor_t poolingDesc, cudnnPoolingMode_t mode, int windowHeight, int windowWidth, int verticalPadding, int horizontalPadding, int verticalStride, int horizontalStride); """ src5 = """ typedef size_t cudnnRNNDescriptor_t; typedef size_t cudnnDropoutDescriptor_t; typedef int cudnnNanPropagation_t; typedef int cudnnRNNInputMode_t; typedef int cudnnDirectionMode_t; typedef int cudnnRNNMode_t; cudnnStatus_t cudnnSetFilter4dDescriptor( cudnnFilterDescriptor_t filterDesc, cudnnDataType_t dataType, cudnnTensorFormat_t format, int k, int c, int h, int w); cudnnStatus_t cudnnGetFilter4dDescriptor( const cudnnFilterDescriptor_t filterDesc, cudnnDataType_t *dataType, cudnnTensorFormat_t *format, int *k, int *c, int *h, int *w); cudnnStatus_t cudnnSetFilterNdDescriptor( cudnnFilterDescriptor_t filterDesc, cudnnDataType_t dataType, cudnnTensorFormat_t format, int nbDims, const int *filterDimA); cudnnStatus_t cudnnGetFilterNdDescriptor( const cudnnFilterDescriptor_t filterDesc, int nbDimsRequested, cudnnDataType_t *dataType, cudnnTensorFormat_t *format, int *nbDims, int *filterDimA); cudnnStatus_t cudnnSetPooling2dDescriptor( cudnnPoolingDescriptor_t poolingDesc,
<gh_stars>10-100 import argparse import json import sys import tornado.httpserver import tornado.ioloop import tornado.web from tornado.web import url from tornado_swagger.setup import setup_swagger from tornado_swagger.model import register_swagger_model import database as db def load_config_file(file_name): with open(file_name, 'r') as file: return json.load(file) VERSION_NUMBER = "1.2.0" @register_swagger_model class SeriesModel: """ --- type: object description: Series object properties: id: type: integer description: Series id. name: type: string description: Series name. team: type: string description: Team assigned to series. From TestArchiver. builds: type: integer description: Number of builds in the series. last_build: type: integer description: Latest build number for the series. last_build_id: type: string description: Latest build id for the series. last_generated: type: string format: date-time description: When available, the timestamp when the output of the last build was generated. last_imported: type: string format: date-time description: Timestamp when the first results of the last build were archived. last_started: type: string format: date-time description: The last build starting time i.e. the first timestamp in the last build. last_status: type: string description: The last build status (for so far archived results). sorting_value: type: string format: date-time description: Timestamp used for sorting the series. Either last_started or last_imported """ @register_swagger_model class BuildModel: """ --- type: object description: Build object properties: build_number: type: integer description: build number in the series. build_id: type: string description: build identifier string in the series. name: type: string description: Series name. team: type: string description: Team assigned to series. test_runs: type: array description: list of ids of test runs that belong to the build items: type: integer description: test run id status: type: string description: Status of the build generation_time: type: string format: date-time description: When available, the timestamp when the test output of the build was generated. archiving_time: type: string format: date-time description: Timestamp when the first results of the build were archived. start_time: type: string format: date-time description: The build starting time i.e. the first timestamp in the build. """ @register_swagger_model class SimpleTestResultModel: """ --- type: object description: Result of a test case properties: id: type: integer description: Id of the test case name: type: string description: Name of the test case full_name: type: string description: Full name of the test case test_run_id: type: integer description: Id of the test run producing this result start_time: type: string format: date-time description: Timestamp for the test execution start status: type: string description: Final status of the test case setup_status: type: string description: Status of the test case setup phase, null if there was no setup execution_status: type: string description: Status of the test case execution phase, null if there was no execution teardown_status: type: string description: Status of the test case teardown phase, null if there was no teardown elapsed: type: string description: Total running time of the test case in millis setup_elapsed: type: string description: Running time of the test case setup phase in millis, null if there was no setup execution_elapsed: type: string description: Running time of the test case execution phase in millis, null if there was no execution teardown_elapsed: type: string description: Running time of the test case teardown phase in millis, null if there was no teardown fingerprint: type: string description: Fingerprint of the test case setup_fingerprint: type: string description: Fingerprint of the test case setup phase, refers to a keyword tree, null if there was no setup execution_fingerprint: type: string description: Fingerprint of the test case execution phase, refers to a keyword tree, null if there was no execution teardown_fingerprint: type: string description: Fingerprint of the test case teardown phase, refers to a keyword tree, null if there was no teardown """ @register_swagger_model class SimpleBuildResultModel: """ --- type: object description: Suite information and test results directly under it properties: id: type: integer description: Id of the suite name: type: string description: Name of the suite full_name: type: string description: Full name of the suite repository: type: string description: Repository of the suite tests: type: array description: List of test case results included in the suite items: $ref: '#/definitions/SimpleTestResultModel' """ @register_swagger_model class LogMessageModel: """ --- type: object description: Log message object properties: id: type: integer description: Id of the log message timestamp: type: string format: date-time description: Timestamp of the log message message: type: string description: Message logged. log_level: type: string description: Logging level of the message suite_id: type: integer description: Id of the suite that this message belongs to test_id: type: integer description: Id of the test case that this message belongs to. If null the loggin occurred in suite setup or teardown test_run_id: type: integer description: Id of the test run """ @register_swagger_model class KeywordModel: """ --- type: object description: Keyword or test step object properties: fingerprint: type: string description: Fingerprint (SHA1 hash of a execution tree) keyword: type: string description: Keyword/test step name. If null this represents a virtual test step library: type: string description: Library of the keyword/test step. If null this represents a virtual test step status: type: string description: Status of the keyword/test step arguments: type: array description: Arguments given to the keyword/test step items: type: string description: String representation of arguments given to the keyword/test step children: type: array description: Sub keywords/test steps called by invoked by this keyword items: $ref: '#/definitions/KeywordModel' """ @register_swagger_model class BuildKeywordAnalysisObjectModel: """ --- type: object description: Object representing keyword execution analysis data for keyword analysis table properties: library: type: string description: Library of the keyword/test step. If null this the keyword has no library keyword: type: string description: Keyword/test step name. If null this represents a virtual test step percent: type: number description: Percentage of total build execution time spent executing this keyword. min: type: integer description: Minimum execution time of the keyword within build in milli seconds avg: type: integer description: Average execution time of the keyword within build in milli seconds max: type: integer description: Maximum execution time of the keyword within build in milli seconds total: type: integer description: Total execution time of the keyword within build in milli seconds calls: type: integer description: Total number of times the keyword was called within build versions: type: integer description: Number of distinct versions/fingerprints for the keyword execution within build max_call_depth: type: integer description: Maximum number of keywords called before the keyword was called. """ class Application(tornado.web.Application): def __init__(self, database): handlers = [ url(r"/$", BaseDataHandler), url(r"/data/?$", BaseDataHandler), url(r"/data/team_names/?$", TeamNamesDataHandler), url(r"/data/teams/?$", TeamsDataHandler), url(r"/data/series/?$", SeriesDataHandler), url(r"/data/series/(?P<series>[0-9]+)/info/?$", SeriesInfoDataHandler), url(r"/data/series/(?P<series>[0-9]+)/builds/?$", BuildsDataHandler), url(r"/data/series/(?P<series>[0-9]+)/builds/(?P<build_number>[0-9]+)/info/?$", BuildInfoDataHandler), url(r"/data/series/(?P<series>[0-9]+)/builds/(?P<build_number>[0-9]+)/simple_results/?$", BuildSimpleResultsDataHandler), url(r"/data/series/(?P<series>[0-9]+)/builds/(?P<build_number>[0-9]+)/keyword_analysis/?$", KeywordAnalysisDataHandler), url(r"/data/series/(?P<series>[0-9]+)/builds/(?P<build_number>[0-9]+)/suites/(?P<suite>[0-9]+)/?$", SuiteResultDataHandler), url(r"/data/series/(?P<series>[0-9]+)/builds/(?P<build_number>[0-9]+)/suites/(?P<suite>[0-9]+)/info/?$", SuiteResultInfoDataHandler), url(r"/data/series/(?P<series>[0-9]+)/history/?$", HistoryDataHandler), url(r"/data/series/(?P<series>[0-9]+)/most_stable_tests/?$", MostStableTestsDataHandler), url(r"/data/series/(?P<series>[0-9]+)/status_counts/?$", SeriesStatusCountsDataHandler), url(r"/data/series/(?P<series>[0-9]+)/builds/(?P<build_number>[0-9]+)/metadata/?$", MetaDataHandler), url(r"/data/test_runs/(?P<test_run>[0-9]+)/suites/(?P<suite>[0-9]+)/log_messages?$", SuiteLogMessageDataHandler), url(r"/data/test_runs/(?P<test_run>[0-9]+)/test_cases/(?P<test>[0-9]+)/log_messages?$", TestCaseLogMessageDataHandler), url(r"/data/keyword_tree/(?P<fingerprint>[0-9a-fA-F]{40})/?$", KeywordTreeDataHandler), # For query testing purposes only url(r"/data/foo/?$", FooDataHandler) ] settings = dict(debug=True) self.database = database setup_swagger(handlers, swagger_url="/data/doc", description='Project repo at https://github.com/salabs/Epimetheus', api_version=VERSION_NUMBER, title='Epimetheus backend API') tornado.web.Application.__init__(self, handlers, **settings) class InvalidArgumentError(ValueError): """Exception for communicating an invalid user argument was suplied""" def free_connection(connections): if isinstance(connections, list): for conn in connections: conn.free() else: connections.free() @tornado.gen.coroutine def coroutine_query(querer, *args, **kwargs): rows, formatter = querer(*args, **kwargs) rows = yield rows results = formatter(rows) free_connection(rows) return results class BaseHandler(tornado.web.RequestHandler): # Default error handling is to return HTTP status 500 def write_error(self, status_code, **kwargs): self.send_error_response(500, 'Server error') @property def database(self): return self.application.database def data_received(self, chunk): pass def get_int_argument(self, name, default=None): value = self.get_argument(name, default) try: return None if value is None else int(value) except ValueError: self.send_error_response( 400, "Bad request. Argument '{}' should be an integer".format(name)) raise InvalidArgumentError() def get_restricted_argument(self, name, options): """The first option is considered the default option""" value = self.get_argument(name, options[0]) if value not in options: message = "Bad request. Argument '{}' should be one of {}".format( name, options) self.send_error_response(400, message) raise InvalidArgumentError() return value def send_error_response(self, status, message=''): self.set_header('Content-Type', 'application/json') self.set_status(status) self.write({'error': {'code': status, 'message': message}}) def send_bad_request_response(self): self.send_error_response(400, 'Bad request') def send_not_found_response(self): self.send_error_response(404, 'Not found') @tornado.gen.coroutine def keyword_tree(self, fingerprint): if not fingerprint: return None keyword_tree = yield coroutine_query(self.database.keyword_tree, fingerprint) if keyword_tree: keyword_tree['children'] = [] keyword_tree = yield self.child_trees(keyword_tree) return keyword_tree return None @tornado.gen.coroutine def child_trees(self, keyword_tree): if 'children' not in keyword_tree: keyword_tree['children'] = [] children = yield coroutine_query(self.database.subtrees, keyword_tree['fingerprint']) for child in
queue. mock_add_queue.mock_calls = [] response = self.testapp.get( '/tasks/crawl_list?%s' % '&'.join( ['%s=%s' % (i[0], i[1]) for i in params.iteritems()])) self.assertEqual(200, response.status_int) self.assertTweetDbContents(['12', '10', '8', '3'], '123') # Even though the original tweet '3' was not returned, we shouldn't have # enqueued more because only one tweet was crawled. calls = mock_add_queue.mock_calls self.assertEquals(0, len(calls)) @mock.patch.object(taskqueue, 'add') def testCrawlList_stopBackfillMaxRequests(self, mock_add_queue): # Crawl one tweet with a small ID. self.SetTimelineResponse(self.CreateTweet(3, ('alice', 2))) response = self.testapp.get('/tasks/crawl_list?list_id=123') self.assertEqual(200, response.status_int) # Crawl two tweets with a recent ID which is after the ID that # should be indexed. self.SetTimelineResponse([self.CreateTweet(12, ('alice', 2)), self.CreateTweet(10, ('alice', 2))]) response = self.testapp.get( '/tasks/crawl_list?list_id=123&total_requests_made=%d' % ( crawl_lists.MAX_REQUESTS - 1)) self.assertEqual(200, response.status_int) self.assertTweetDbContents(['12', '10', '3'], '123') # This did not enqueue a crawling request because the maximum number # of crawl requests has been made. calls = mock_add_queue.mock_calls self.assertEquals(0, len(calls)) @mock.patch.object(taskqueue, 'add') def testCrawlList_simulateCrawlFollowUpEnqueueAnother(self, mock_add_queue): # Crawl one tweet with a recent ID which is after the ID that # should be indexed. self.SetTimelineResponse(self.CreateTweet(10, ('alice', 2))) response = self.testapp.get('/tasks/crawl_list?list_id=123') self.assertEqual(200, response.status_int) self.assertTweetDbContents(['10'], '123') # Simulate crawling 1 more. twts = [self.CreateTweet(i, ('alice', 2)) for i in range(7, 5, -1)] self.SetTimelineResponse(twts) params = { 'list_id': '123', 'total_crawled': 2L, 'max_id': 9L, 'since_id': 3L, 'num_to_crawl': 2L, 'total_requests_made': 1, } response = self.testapp.get( '/tasks/crawl_list?%s' % '&'.join( ['%s=%s' % (i[0], i[1]) for i in params.iteritems()])) self.assertEqual(200, response.status_int) self.assertTweetDbContents(['10', '7', '6'], '123') # There are still more to crawl since the original tweet has not been # returned in the timeline yet. calls = mock_add_queue.mock_calls self.assertEquals(1, len(calls)) def testCrawlAllLists_noLists(self): response = self.testapp.get('/tasks/crawl_all_lists') self.assertEqual(200, response.status_int) self.assertEqual('No lists to crawl', response.body) @mock.patch.object(taskqueue, 'add') def testCrawlAllLists_someLists(self, mock_add_queue): self.SetJsonResponse('{"lists": [{"id_str": "1234"}, {"id_str": "87"}]}') self.testapp.get('/tasks/update_lists_rate_limited') response = self.testapp.get('/tasks/crawl_all_lists') self.assertEqual(200, response.status_int) self.assertTrue(response.body.find('1234') != -1) calls = mock_add_queue.mock_calls self.assertEquals(2, len(calls)) self.assertEquals(calls[0], mock.call( url='/tasks/crawl_list', method='GET', params={'list_id': '1234', 'fake_data': ''}, queue_name='list-statuses')) self.assertEquals(calls[1], mock.call( url='/tasks/crawl_list', method='GET', params={'list_id': '87', 'fake_data': ''}, queue_name='list-statuses')) def testCrawlAllUsers_noUsers(self): """Ensure crawl_all_users handles case when there are no users.""" response = self.testapp.get('/tasks/crawl_all_lists') self.assertEqual(200, response.status_int) self.assertEqual('No lists to crawl', response.body) @mock.patch.object(taskqueue, 'add') def testCrawlAllUsers_someUsers(self, mock_add_queue): """Ensure crawl_all_users handles case when there is one user.""" self.CreateUser(2, 'bob').put() response = self.testapp.get('/tasks/crawl_all_users') self.assertEqual(200, response.status_int) self.assertTrue(response.body.find('2') != -1) calls = mock_add_queue.mock_calls self.assertEquals(1, len(calls)) self.assertEquals(calls[0], mock.call( url='/tasks/crawl_users', method='POST', params={'user_id': '2'}, queue_name='lookup-users')) @mock.patch.object(taskqueue, 'add') def testCrawlAllUsers_manyUsers(self, mock_add_queue): """Ensure crawl_all_users handles case when there many users.""" offset = 1000 num_users = crawl_lists.MAX_USERS_PER_CRAWL_REQUEST + 1 for i in range(crawl_lists.MAX_USERS_PER_CRAWL_REQUEST + 1): self.CreateUser(i + offset, 'bob_%s' % i).put() response = self.testapp.get('/tasks/crawl_all_users') self.assertEqual(200, response.status_int) # Ensure it enqueued requests for all of them. logging.info('crawl_users response: %s', response.body) self.assertTrue(response.body.find('%s' % num_users) != -1) calls = mock_add_queue.mock_calls self.assertEquals(2, len(calls)) self.assertEquals(calls[0], mock.call( url='/tasks/crawl_users', method='POST', params={'user_id': ','.join( [str(i + offset) for i in range(num_users - 1)])}, queue_name='lookup-users')) self.assertEquals(calls[1], mock.call( url='/tasks/crawl_users', method='POST', params={'user_id': '%s' % (num_users + offset - 1)}, queue_name='lookup-users')) @mock.patch.object(taskqueue, 'add') def testCrawlUsers_newScreenName(self, mock_add_queue): """Ensure crawl_users updates screen_names if they have changed.""" # Create the canonical user. bob = self.CreateUser(2, 'bob') key = bob.key json_obj = json.loads(bob.ToJsonString()) json_obj.get('user', {})['screen_name'] = 'Bob' tweets.User.GetOrInsertFromJson(json_obj) self.assertUserDbContents(['2']) self.SetJsonResponse('[%s]' % bob.ToJsonString()) response = self.testapp.post('/tasks/crawl_users', params={ 'user_id': '2'}) self.assertEqual(200, response.status_int) self.assertUserDbContents(['2']) # Assert that the profile URL was updated in the db. users = tweets.User.query().fetch() self.assertEquals('bob', users[0].screen_name) @mock.patch.object(taskqueue, 'add') def testUpdateLists_cronEntryPoint(self, mock_add_queue): response = self.testapp.get('/tasks/update_lists') self.assertEqual(200, response.status_int) self.assertTrue(response.body.find('Enqueued') != -1) calls = mock_add_queue.mock_calls self.assertEquals(1, len(calls)) self.assertEquals(calls[0], mock.call( url='/tasks/update_lists_rate_limited', method='GET', queue_name='list-lists')) def testFindTeamsInTweet(self): """Verify that we can find the teams in a tweet.""" # Create a user and add it to the db. user = self.CreateUser(2, 'bob') user.put() crawl_lists_handler = crawl_lists.CrawlListHandler() twt = self.CreateTweet(1, ('bob', 2)) teams = crawl_lists_handler._FindTeamsInTweet(twt, {}) # Make sure we found 'bob' correctly. self.assertEquals(2, teams[0].twitter_id) self.assertEquals(crawl_lists.UNKNOWN_SR_ID, teams[1].score_reporter_id) def testFindTeamsInTweet_newUserThisCrawlCycle(self): """Verify a user can be found when it's not in the db but was crawled.""" # Create a user and add it to the db. user = self.CreateUser(2, 'bob') user_db = {'2': user} crawl_lists_handler = crawl_lists.CrawlListHandler() twt = self.CreateTweet(1, ('bob', 2)) teams = crawl_lists_handler._FindTeamsInTweet(twt, user_db) # Make sure we found 'bob' correctly. self.assertEquals(2, teams[0].twitter_id) self.assertEquals(crawl_lists.UNKNOWN_SR_ID, teams[1].score_reporter_id) def testFindTeamsInTweet_userMentionOfSecondTeam(self): """Verify a user can be found when it mentions another user.""" # Create a user and add it to the db. bob = self.CreateUser(2, 'bob') alice = self.CreateUser(3, 'alice') user_db = {'2': bob, '3': alice} crawl_lists_handler = crawl_lists.CrawlListHandler() twt = self.CreateTweet(1, ('bob', 2)) twt.entities.user_mentions = [tweets.UserMentionEntity( user_id='3', user_id_64=3)] teams = crawl_lists_handler._FindTeamsInTweet(twt, user_db) # Make sure we found 'bob' correctly. self.assertEquals(2, teams[0].twitter_id) self.assertEquals(3, teams[1].twitter_id) def testFindTeamsInTweet_userMentionOfSecondTeamWrongDivision(self): """Verify a user can be found when it mentions another user.""" # Create a user and add it to the db. bob = self.CreateUser(2, 'bob') bob.from_list = list_id_bimap.ListIdBiMap.USAU_COLLEGE_OPEN_LIST_ID alice = self.CreateUser(3, 'alice') alice.from_list = list_id_bimap.ListIdBiMap.USAU_COLLEGE_WOMENS_LIST_ID user_db = {'2': bob, '3': alice} crawl_lists_handler = crawl_lists.CrawlListHandler() twt = self.CreateTweet(1, ('bob', 2)) twt.entities.user_mentions = [tweets.UserMentionEntity( user_id='3', user_id_64=3)] teams = crawl_lists_handler._FindTeamsInTweet(twt, user_db) # Make sure we found 'bob' correctly. self.assertEquals(2, teams[0].twitter_id) self.assertEquals(crawl_lists.UNKNOWN_SR_ID, teams[1].score_reporter_id) def testFindTeamsInTweet_noExistingUser(self): """Handle the case gracefully if the user doesn't exist in db.""" crawl_lists_handler = crawl_lists.CrawlListHandler() twt = self.CreateTweet(1, ('bob', 2)) teams = crawl_lists_handler._FindTeamsInTweet(twt, {}) self.assertEquals(None, teams[0].twitter_id) self.assertEquals(None, teams[1].twitter_id) def testFindMostConsistentGame_noGamesInDb(self): """Verify that it doesn't find any consistent games if none exist.""" crawl_lists_handler = crawl_lists.CrawlListHandler() twt = self.CreateTweet(1, ('bob', 2)) teams = crawl_lists_handler._FindTeamsInTweet(twt, {}) scores = [0, 0] (score, game) = crawl_lists_handler._FindMostConsistentGame(twt, [], teams, Division.OPEN, AgeBracket.NO_RESTRICTION, League.USAU, scores) self.assertEquals(0.0, score) self.assertEquals(None, game) def testFindMostConsistentGame(self): """Verify that it finds consistent games if it exists.""" user = self.CreateUser(2, 'bob') crawl_lists_handler = crawl_lists.CrawlListHandler() now = datetime.utcnow() twt = self.CreateTweet(1, ('bob', 2), created_at=now) # The first team will be 'bob' teams = crawl_lists_handler._FindTeamsInTweet(twt, {'2': user}) source = GameSource(type=GameSourceType.TWITTER, home_score=5, away_score=7, update_date_time=now - timedelta(minutes=5)) # Create a game with 'bob' in that division, age_bracket, and league game = Game(id_str='new game', teams=teams, scores=[5, 7], division=Division.OPEN, age_bracket=AgeBracket.NO_RESTRICTION, league=League.USAU, created_at=now, last_modified_at=now, sources=[source]) # Score has to be a plausible update to the game. scores = [6, 7] (score, found_game) = crawl_lists_handler._FindMostConsistentGame(twt, [game], teams, Division.OPEN, AgeBracket.NO_RESTRICTION, League.USAU, scores) # Score should be high since the time of the Tweet is close to the game. self.assertTrue(score >= 0.9) self.assertEquals(game, found_game) def testFindMostConsistentGame_scoreReporter(self): """Verify that it finds consistent games if it exists.""" user = self.CreateUser(2, 'bob') crawl_lists_handler = crawl_lists.CrawlListHandler() now = datetime.utcnow() twt = self.CreateTweet(1, ('bob', 2), created_at=now) # The first team will be 'bob' teams = crawl_lists_handler._FindTeamsInTweet(twt, {'2': user}) # Simulate that this is a game crawled by score reporter weeks ago. score_crawl_time = now - timedelta(weeks=5) source = GameSource(type=GameSourceType.SCORE_REPORTER, home_score=0, away_score=0, update_date_time=score_crawl_time) # Create a game with 'bob' in that division, age_bracket, and league game = Game(id_str='new game', teams=teams, scores=[0, 0], division=Division.OPEN, age_bracket=AgeBracket.NO_RESTRICTION, start_time=now - timedelta(hours=1), league=League.USAU, created_at=score_crawl_time, last_modified_at=score_crawl_time, sources=[source]) # Score has to be a plausible update to the game. scores = [2, 3] (score, found_game) = crawl_lists_handler._FindMostConsistentGame(twt, [game], teams, Division.OPEN, AgeBracket.NO_RESTRICTION, League.USAU, scores) # Score should be high since the time of the Tweet is close to the game. self.assertTrue(score >= crawl_lists.GAME_CONSISTENCY_THRESHOLD) self.assertEquals(game, found_game) def testFindMostConsistentGame_noSourceScores(self): """Verify GameSources with no scores are handled.""" user = self.CreateUser(2, 'bob') crawl_lists_handler = crawl_lists.CrawlListHandler() now = datetime.utcnow() twt = self.CreateTweet(1, ('bob', 2), created_at=now) # The first team will be 'bob' teams = crawl_lists_handler._FindTeamsInTweet(twt, {'2': user}) source = GameSource(type=GameSourceType.TWITTER, update_date_time=now - timedelta(minutes=5)) # Create a game with 'bob' in that division, age_bracket, and league game = Game(id_str='new game', teams=teams, scores=[5, 7], division=Division.OPEN, age_bracket=AgeBracket.NO_RESTRICTION, league=League.USAU, created_at=now, last_modified_at=now, sources=[source]) scores = [6, 7] (score, found_game) = crawl_lists_handler._FindMostConsistentGame(twt, [game], teams, Division.OPEN, AgeBracket.NO_RESTRICTION, League.USAU, scores) self.assertEqual(0.0, score) self.assertEqual(None, found_game) def testFindMostConsistentGame_noMatchingGamesWithTeam(self): """Verify no consistent game is found if no games with that team exist.""" user = self.CreateUser(2, 'bob') user.put() user = self.CreateUser(3, 'alice') user.put() crawl_lists_handler = crawl_lists.CrawlListHandler() now = datetime.utcnow() # Create a tweet for a different user than the one in the game. twt = self.CreateTweet(1, ('alice', 3), created_at=now) # The first team will be 'alice', 2nd will be unknown. twt_teams = crawl_lists_handler._FindTeamsInTweet(twt, {}) twt = self.CreateTweet(2, ('bob', 2), created_at=now) # The first team will be 'bob', 2nd will be unknown. game_teams = crawl_lists_handler._FindTeamsInTweet(twt, {}) # Create
def get_fdendrogram( self ): return self.fdendrogram class Heatmap: datatype = 'heatmap' bbcyr = {'red': ( (0.0, 0.0, 0.0), (0.25, 0.0, 0.0), (0.50, 0.0, 0.0), (0.75, 1.0, 1.0), (1.0, 1.0, 1.0)), 'green': ( (0.0, 0.0, 0.0), (0.25, 0.0, 0.0), (0.50, 1.0, 1.0), (0.75, 1.0, 1.0), (1.0, 0.0, 1.0)), 'blue': ( (0.0, 0.0, 0.0), (0.25, 1.0, 1.0), (0.50, 1.0, 1.0), (0.75, 0.0, 0.0), (1.0, 0.0, 1.0))} bbcry = {'red': ( (0.0, 0.0, 0.0), (0.25, 0.0, 0.0), (0.50, 0.0, 0.0), (0.75, 1.0, 1.0), (1.0, 1.0, 1.0)), 'green': ( (0.0, 0.0, 0.0), (0.25, 0.0, 0.0), (0.50, 1.0, 1.0), (0.75, 0.0, 0.0), (1.0, 1.0, 1.0)), 'blue': ( (0.0, 0.0, 0.0), (0.25, 1.0, 1.0), (0.50, 1.0, 1.0), (0.75, 0.0, 0.0), (1.0, 0.0, 1.0))} bcry = {'red': ( (0.0, 0.0, 0.0), (0.33, 0.0, 0.0), (0.66, 1.0, 1.0), (1.0, 1.0, 1.0)), 'green': ( (0.0, 0.0, 0.0), (0.33, 1.0, 1.0), (0.66, 0.0, 0.0), (1.0, 1.0, 1.0)), 'blue': ( (0.0, 1.0, 1.0), (0.33, 1.0, 1.0), (0.66, 0.0, 0.0), (1.0, 0.0, 1.0))} my_colormaps = [ ('bbcyr',bbcyr), ('bbcry',bbcry), ('bcry',bcry)] dcols = ['#ca0000','#0087ff','#00ba1d','#cf00ff','#00dbe2','#ffaf00','#0017f4','#006012','#e175ff','#877878','#050505','#b5cf00','#ff8a8a','#aa6400','#50008a','#00ff58'] @staticmethod def input_parameters( parser ): hm_param = parser.add_argument_group('Heatmap options') arg = hm_param.add_argument arg( '--dpi', type=int, default=150, help = "Image resolution in dpi [default 150]") arg( '-l', '--log_scale', action='store_true', help = "Log scale" ) arg( '-s', '--sqrt_scale', action='store_true', help = "Square root scale" ) arg( '--no_slabels', action='store_true', help = "Do not show sample labels" ) arg( '--minv', type=float, default=None, help = "Minimum value to display in the color map [default None meaning automatic]" ) arg( '--maxv', type=float, default=None, help = "Maximum value to display in the color map [default None meaning automatic]" ) arg( '--no_flabels', action='store_true', help = "Do not show feature labels" ) arg( '--max_slabel_len', type=int, default=25, help = "Max number of chars to report for sample labels [default 15]" ) arg( '--max_flabel_len', type=int, default=25, help = "Max number of chars to report for feature labels [default 15]" ) arg( '--flabel_size', type=int, default=10, help = "Feature label font size [default 10]" ) arg( '--slabel_size', type=int, default=10, help = "Sample label font size [default 10]" ) arg( '--fdend_width', type=float, default=1.0, help = "Width of the feature dendrogram [default 1 meaning 100%% of default heatmap width]") arg( '--sdend_height', type=float, default=1.0, help = "Height of the sample dendrogram [default 1 meaning 100%% of default heatmap height]") arg( '--metadata_height', type=float, default=.05, help = "Height of the metadata panel [default 0.05 meaning 5%% of default heatmap height]") arg( '--metadata_separation', type=float, default=.01, help = "Distance between the metadata and data panels. [default 0.001 meaning 0.1%% of default heatmap height]") arg( '--image_size', type=float, default=8, help = "Size of the largest between width and eight size for the image in inches [default 8]") arg( '--cell_aspect_ratio', type=float, default=1.0, help = "Aspect ratio between width and height for the cells of the heatmap [default 1.0]") col_maps = ['Accent', 'Blues', 'BrBG', 'BuGn', 'BuPu', 'Dark2', 'GnBu', 'Greens', 'Greys', 'OrRd', 'Oranges', 'PRGn', 'Paired', 'Pastel1', 'Pastel2', 'PiYG', 'PuBu', 'PuBuGn', 'PuOr', 'PuRd', 'Purples', 'RdBu', 'RdGy', 'RdPu', 'RdYlBu', 'RdYlGn', 'Reds', 'Set1', 'Set2', 'Set3', 'Spectral', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd', 'afmhot', 'autumn', 'binary', 'bone', 'brg', 'bwr', 'cool', 'copper', 'flag', 'gist_earth', 'gist_gray', 'gist_heat', 'gist_ncar', 'gist_rainbow', 'gist_stern', 'gist_yarg', 'gnuplot', 'gnuplot2', 'gray', 'hot', 'hsv', 'jet', 'ocean', 'pink', 'prism', 'rainbow', 'seismic', 'spectral', 'spring', 'summer', 'terrain', 'winter'] + [n for n,c in Heatmap.my_colormaps] for n,c in Heatmap.my_colormaps: my_cmap = matplotlib.colors.LinearSegmentedColormap(n,c,256) pylab.register_cmap(name=n,cmap=my_cmap) arg( '-c','--colormap', type=str, choices = col_maps, default = 'bbcry' ) arg( '--bottom_c', type=str, default = None, help = "Color to use for cells below the minimum value of the scale [default None meaning bottom color of the scale]") arg( '--top_c', type=str, default = None, help = "Color to use for cells below the maximum value of the scale [default None meaning bottom color of the scale]") arg( '--nan_c', type=str, default = None, help = "Color to use for nan cells [default None]") """ arg( '--', type=str, default="average", help = "Linkage method for feature clustering [default average]") arg( '--slinkage', type=str, default="average", help = "Linkage method for sample clustering [default average]") """ def __init__( self, numpy_matrix, sdendrogram, fdendrogram, snames, fnames, fnames_meta, args = None ): self.numpy_matrix = numpy_matrix self.sdendrogram = sdendrogram self.fdendrogram = fdendrogram self.snames = snames self.fnames = fnames self.fnames_meta = fnames_meta self.ns,self.nf = self.numpy_matrix.shape self.args = args def make_legend( self, dmap, titles, out_fn ): figlegend = plt.figure(figsize=(1+3*len(titles),2), frameon = False) gs = gridspec.GridSpec( 1, len(dmap), wspace = 2.0 ) for i,(d,title) in enumerate(zip(dmap,titles)): legax = plt.subplot(gs[i],frameon = False) for k,v in sorted(d.items(),key=lambda x:x[1]): rect = Rectangle( [0.0, 0.0], 0.0, 0.0, facecolor = self.dcols[v%len(self.dcols)], label = k, edgecolor='b', lw = 0.0) legax.add_patch(rect) #remove_splines( legax ) legax.set_xticks([]) legax.set_yticks([]) legax.legend( loc = 2, frameon = False, title = title) """ ncol = legend_ncol, bbox_to_anchor=(1.01, 3.), borderpad = 0.0, labelspacing = 0.0, handlelength = 0.5, handletextpad = 0.3, borderaxespad = 0.0, columnspacing = 0.3, prop = {'size':fontsize}, frameon = False) """ if out_fn: figlegend.savefig(out_fn, bbox_inches='tight') def draw( self ): rat = float(self.ns)/self.nf rat *= self.args.cell_aspect_ratio x,y = (self.args.image_size,rat*self.args.image_size) if rat < 1 else (self.args.image_size/rat,self.args.image_size) fig = plt.figure( figsize=(x,y), facecolor = 'w' ) cm = pylab.get_cmap(self.args.colormap) bottom_col = [ cm._segmentdata['red'][0][1], cm._segmentdata['green'][0][1], cm._segmentdata['blue'][0][1] ] if self.args.bottom_c: bottom_col = self.args.bottom_c cm.set_under( bottom_col ) top_col = [ cm._segmentdata['red'][-1][1], cm._segmentdata['green'][-1][1], cm._segmentdata['blue'][-1][1] ] if self.args.top_c: top_col = self.args.top_c cm.set_over( top_col ) if self.args.nan_c: cm.set_bad( self.args.nan_c ) def make_ticklabels_invisible(ax): for tl in ax.get_xticklabels() + ax.get_yticklabels(): tl.set_visible(False) ax.set_xticks([]) ax.set_yticks([]) def remove_splines( ax ): for v in ['right','left','top','bottom']: ax.spines[v].set_color('none') def shrink_labels( labels, n ): shrink = lambda x: x[:n/2]+" [...] "+x[-n/2:] return [(shrink(str(l)) if len(str(l)) > n else l) for l in labels] #gs = gridspec.GridSpec( 4, 2, # width_ratios=[1.0-fr_ns,fr_ns], # height_ratios=[.03,0.03,1.0-fr_nf,fr_nf], # wspace = 0.0, hspace = 0.0 ) fr_ns = float(self.ns)/max([self.ns,self.nf]) fr_nf = float(self.nf)/max([self.ns,self.nf]) buf_space = 0.05 minv = min( [buf_space*8, 8*rat*buf_space] ) if minv < 0.05: buf_space /= minv/0.05 metadata_height = self.args.metadata_height if type(snames[0]) is tuple and len(snames[0]) > 1 else 0.000001 gs = gridspec.GridSpec( 6, 4, width_ratios=[ buf_space, buf_space*2, .08*self.args.fdend_width,0.9], height_ratios=[ buf_space, buf_space*2, .08*self.args.sdend_height, metadata_height, self.args.metadata_separation, 0.9], wspace = 0.0, hspace = 0.0 ) ax_hm = plt.subplot(gs[23], axisbg = bottom_col ) ax_metadata = plt.subplot(gs[15], axisbg = bottom_col ) ax_hm_y2 = ax_hm.twinx() norm_f = matplotlib.colors.Normalize if self.args.log_scale: norm_f = matplotlib.colors.LogNorm elif self.args.sqrt_scale: norm_f = SqrtNorm minv, maxv = 0.0, None maps, values, ndv = [], [], 0 if type(snames[0]) is tuple and len(snames[0]) > 1: metadata = zip(*[list(s[1:]) for s in snames]) for m in metadata: mmap = dict([(v[1],ndv+v[0]) for v in enumerate(list(set(m)))]) values.append([mmap[v] for v in m]) ndv += len(mmap) maps.append(mmap) dcols = [] mdmat = np.matrix(values) while len(dcols) < ndv: dcols += self.dcols cmap = matplotlib.colors.ListedColormap(dcols[:ndv]) bounds = [float(f)-0.5 for f in range(ndv+1)] imm = ax_metadata.imshow( mdmat, #origin='lower', interpolation = 'nearest', aspect='auto', extent = [0, self.nf, 0, self.ns], cmap=cmap, vmin=bounds[0], vmax=bounds[-1], ) remove_splines( ax_metadata ) ax_metadata_y2 = ax_metadata.twinx() ax_metadata_y2.set_ylim(0,len(self.fnames_meta)) ax_metadata.set_yticks([]) ax_metadata_y2.set_ylim(0,len(self.fnames_meta)) ax_metadata_y2.tick_params(length=0) ax_metadata_y2.set_yticks(np.arange(len(self.fnames_meta))+0.5) ax_metadata_y2.set_yticklabels(self.fnames_meta[::-1], va='center',size=self.args.flabel_size) else: ax_metadata.set_yticks([]) ax_metadata.set_xticks([]) im = ax_hm.imshow( self.numpy_matrix, #origin='lower', interpolation = 'nearest', aspect='auto', extent = [0, self.nf, 0, self.ns], cmap=cm, vmin=self.args.minv, vmax=self.args.maxv, norm = norm_f( vmin=minv if minv > 0.0 else None, vmax=maxv) ) #ax_hm.set_ylim([0,800]) ax_hm.set_xticks(np.arange(len(list(snames)))+0.5) if not self.args.no_slabels: snames_short = shrink_labels( list([s[0] for s in snames]) if type(snames[0]) is tuple else snames, self.args.max_slabel_len ) ax_hm.set_xticklabels(snames_short,rotation=90,va='top',ha='center',size=self.args.slabel_size) else: ax_hm.set_xticklabels([]) ax_hm_y2.set_ylim([0,self.ns]) ax_hm_y2.set_yticks(np.arange(len(fnames))+0.5) if not self.args.no_flabels: fnames_short = shrink_labels( fnames, self.args.max_flabel_len ) ax_hm_y2.set_yticklabels(fnames_short,va='center',size=self.args.flabel_size) else: ax_hm_y2.set_yticklabels( [] ) ax_hm.set_yticks([]) remove_splines( ax_hm ) ax_hm.tick_params(length=0) ax_hm_y2.tick_params(length=0) #ax_hm.set_xlim([0,self.ns]) ax_cm = plt.subplot(gs[3], axisbg = 'r', frameon = False) #fig.colorbar(im, ax_cm, orientation = 'horizontal', spacing = 'proportional', format = ticker.LogFormatterMathtext() ) fig.colorbar(im, ax_cm, orientation = 'horizontal', spacing='proportional' if self.args.sqrt_scale else 'uniform' ) # , format = ticker.LogFormatterMathtext() ) if not self.args.no_sclustering: ax_den_top = plt.subplot(gs[11], axisbg = 'r', frameon = False) sph._plot_dendrogram( self.sdendrogram['icoord'], self.sdendrogram['dcoord'], self.sdendrogram['ivl'], self.ns + 1, self.nf + 1, 1, 'top', no_labels=True,
0, 0, 0, 0], [1242, 4.796811, 0, 9999, -9999, 1.0, 100, 1, 27.074038, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1243, 15.340649, 0, 9999, -9999, 1.0, 100, 1, 83.079842, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1244, 184.554903, 0, 9999, -9999, 1.0, 100, 1, 323.472536, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1245, 0.801193, 0, 9999, -9999, 1.0, 100, 1, 8.080896, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1246, 33.337139, 0, 9999, -9999, 1.0, 100, 1, 57.127825, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1247, 4.667163, 0, 9999, -9999, 1.0, 100, 1, 21.833396, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1248, 38.233335, 0, 9999, -9999, 1.0, 100, 1, 91.958275, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1249, 45.440915, 0, 9999, -9999, 1.0, 100, 1, 76.135177, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1250, 18.589578, 0, 9999, -9999, 1.0, 100, 1, 30.830519, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1251, 18.384975, 0, 9999, -9999, 1.0, 100, 1, 23.404345, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1252, 11.193203, 0, 9999, -9999, 1.0, 100, 1, 14.887727, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1253, 19.743676, 0, 9999, -9999, 1.0, 100, 1, 64.502694, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1254, 24.304007, 0, 9999, -9999, 1.0, 100, 1, 82.278695, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1255, 0.857391, 0, 9999, -9999, 1.0, 100, 1, 3.818419, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1256, 3.772801, 0, 9999, -9999, 1.0, 100, 1, 15.091842, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1257, 28.887836, 0, 9999, -9999, 1.0, 100, 1, 88.95288, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1258, 91.174, 0, 9999, -9999, 1.0, 100, 1, 235.487329, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1259, 32.638531, 0, 9999, -9999, 1.0, 100, 1, 109.288719, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1260, 2.041782, 0, 9999, -9999, 1.0, 100, 1, 20.168717, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1261, 42.795691, 0, 9999, -9999, 1.0, 100, 1, 201.699555, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1262, 0.224763, 0, 9999, -9999, 1.0, 100, 1, 0.524108, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1263, 0.179248, 0, 9999, -9999, 1.0, 100, 1, 0.352421, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1264, 58.061966, 0, 9999, -9999, 1.0, 100, 1, 82.035361, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1265, 4.031822, 0, 9999, -9999, 1.0, 100, 1, 6.654727, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1266, 73.02204, 0, 9999, -9999, 1.0, 100, 1, 119.710849, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1267, 30.733297, 0, 9999, -9999, 1.0, 100, 1, 39.469006, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1268, 0.17743, 0, 9999, -9999, 1.0, 100, 1, 3.4295, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1269, 0.119356, 0, 9999, -9999, 1.0, 100, 1, 5.105829, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1270, 5.449144, 0, 9999, -9999, 1.0, 100, 1, 38.950511, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1271, 8.689982, 0, 9999, -9999, 1.0, 100, 1, 47.371792, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1272, 0.18776, 0, 9999, -9999, 1.0, 100, 1, 1.23166, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1273, 0.185779, 0, 9999, -9999, 1.0, 100, 1, 2.169201, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1274, 36.940444, 0, 9999, -9999, 1.0, 100, 1, 53.095629, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1275, 53.151271, 0, 9999, -9999, 1.0, 100, 1, 99.0753, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1276, 16.024696, 0, 9999, -9999, 1.0, 100, 1, 25.655641, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1277, 32.352771, 0, 9999, -9999, 1.0, 100, 1, 65.611252, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1278, 88.638367, 0, 9999, -9999, 1.0, 100, 1, 170.437781, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1279, 2.7e-05, 0, 9999, -9999, 1.0, 100, 1, 0.004344, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1280, 0.004308, 0, 9999, -9999, 1.0, 100, 1, 0.626494, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1281, 1.7e-05, 0, 9999, -9999, 1.0, 100, 1, 2.51246, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1282, 0.281158, 0, 9999, -9999, 1.0, 100, 1, 4.363037, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1283, 727.46412, 0, 9999, -9999, 1.0, 100, 1, 1297.764428, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1284, 10.703236, 0, 9999, -9999, 1.0, 100, 1, 28.426322, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1285, 0.000803, 0, 9999, -9999, 1.0, 100, 1, 2.937048, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1286, 7.52325, 0, 9999, -9999, 1.0, 100, 1, 17.872201, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1287, 64.152642, 0, 9999, -9999, 1.0, 100, 1, 93.199628, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1288, 117.223654, 0, 9999, -9999, 1.0, 100, 1, 148.402692, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1289, 42.375679, 0, 9999, -9999, 1.0, 100, 1, 184.149235, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1290, 3.474021, 0, 9999, -9999, 1.0, 100, 1, 4.901974, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1291, 62.110286, 0, 9999, -9999, 1.0, 100, 1, 98.293351, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1292, 21.852994, 0, 9999, -9999, 1.0, 100, 1, 41.682074, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1293, 0.873574, 0, 9999, -9999, 1.0, 100, 1, 2.402107, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1294, 2.190087, 0, 9999, -9999, 1.0, 100, 1, 5.39743, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1295, 2.29334, 0, 9999, -9999, 1.0, 100, 1, 5.873666, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1296, 1.380348, 0, 9999, -9999, 1.0, 100, 1, 27.356489, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1297, 18.921538, 0, 9999, -9999, 1.0, 100, 1, 177.778742, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1298, 0.000336, 0, 9999, -9999, 1.0, 100, 1, 4.014603, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1299, 0.003865, 0, 9999, -9999, 1.0, 100, 1, 2.158207, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1300, 2.567936, 0, 9999, -9999, 1.0, 100, 1, 23.74405, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1301, 6.525254, 0, 9999, -9999, 1.0, 100, 1, 60.863304, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1302, 0.511075, 0, 9999, -9999, 1.0, 100, 1, 4.877299, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1303, 0.452906, 0, 9999, -9999, 1.0, 100, 1, 4.335516, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1304, 1.014138, 0, 9999, -9999, 1.0, 100, 1, 9.594319, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1305, 0.001006, 0, 9999, -9999, 1.0, 100, 1, 0.004567, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1306, 0.34908, 0, 9999, -9999, 1.0, 100, 1, 1.827014, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1307, 0.009914, 0, 9999, -9999, 1.0, 100, 1, 0.29894, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1308, 1.178923, 0, 9999, -9999, 1.0, 100, 1, 3.278321, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1309, 1.813403, 0, 9999, -9999, 1.0, 100, 1, 3.34909, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1310, 0.892525, 0, 9999, -9999, 1.0, 100, 1, 1.64589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1311, 0.466682, 0, 9999, -9999, 1.0, 100, 1, 11.854004, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1312, 137.547375, 0, 9999, -9999, 1.0, 100, 1, 262.264924, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1313, 3.279576, 0, 9999, -9999, 1.0, 100, 1, 30.836748, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1314, 1.352249, 0, 9999, -9999, 1.0, 100, 1, 12.003987, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1315, 1.028715, 0, 9999, -9999, 1.0, 100, 1, 7.879027, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1316, 0.127149, 0, 9999, -9999, 1.0, 100, 1, 2.757497, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1317, 7.836832, 0, 9999, -9999, 1.0, 100, 1, 23.958574, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1318, 1.064595, 0, 9999, -9999, 1.0, 100, 1, 1.956332, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1319, 5.636343, 0, 9999, -9999, 1.0, 100, 1, 17.708276, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1320, 4.834162, 0, 9999, -9999, 1.0, 100, 1, 20.75859, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1321, 0.026233, 0, 9999, -9999, 1.0, 100, 1, 0.161123, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1322, 0.336975, 0, 9999, -9999, 1.0, 100, 1, 0.929763, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1323, 76.7013, 0, 9999, -9999, 1.0, 100, 1, 199.111909, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1324, 7.851174, 0, 9999, -9999, 1.0, 100, 1, 13.063258, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1325, 54.578065, 0, 9999, -9999, 1.0, 100, 1, 90.497559, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1326, 14.217809, 0, 9999, -9999, 1.0, 100, 1, 56.928865, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1327, 14.395027, 0, 9999, -9999, 1.0, 100, 1, 50.796895, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1328, 5.136768, 0, 9999, -9999, 1.0, 100, 1, 16.063343, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1329, 138.76475, 0, 9999, -9999, 1.0, 100, 1, 218.675424, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1330, 6.227425, 0, 9999, -9999, 1.0, 100, 1, 30.131028, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1331, 0.055025, 0, 9999, -9999, 1.0, 100, 1, 0.289238, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1332, 4.795228, 0, 9999, -9999, 1.0, 100, 1, 26.293088, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1333, 17.185192, 0, 9999, -9999, 1.0, 100, 1, 45.650254, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1334, 0.005226, 0, 9999, -9999, 1.0, 100, 1, 1.215341, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1335, 0.399098, 0, 9999, -9999, 1.0, 100, 1, 3.306939, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1336, 3.402987, 0, 9999, -9999, 1.0, 100, 1, 29.773035, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1337, 45.578213, 0, 9999, -9999, 1.0, 100, 1, 121.31241, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1338, 0.204303, 0, 9999, -9999, 1.0, 100, 1, 0.832524, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1339, 2.739216, 0, 9999, -9999, 1.0, 100, 1, 10.086482, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1340, 19.957688, 0, 9999, -9999, 1.0, 100, 1, 70.098327, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1341, 74.14197, 0, 9999, -9999, 1.0, 100, 1, 205.513321, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1342, 0.01466, 0, 9999, -9999, 1.0, 100, 1, 0.734589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1344, 0.055392, 0, 9999, -9999, 1.0, 100, 1, 0.226057, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1345, 0.616609, 0, 9999, -9999, 1.0, 100, 1, 3.971188, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1346, 26.954107, 0, 9999, -9999, 1.0, 100, 1, 214.719215, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1347, 67.310572, 0, 9999, -9999, 1.0, 100, 1, 414.115976, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1348, 7.177747, 0, 9999, -9999, 1.0, 100, 1, 22.707927, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1349, 15.268474, 0, 9999, -9999, 1.0, 100, 1, 42.352342, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1350, 0.007403, 0, 9999, -9999, 1.0, 100, 1, 0.094971, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1351, 2.4e-05, 0, 9999, -9999, 1.0, 100, 1, 0.015958, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1352, 0.004221, 0, 9999, -9999, 1.0, 100, 1, 0.83726, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1355, 0.919466, 0, 9999, -9999, 1.0, 100, 1, 1.688324, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1356, 9.681643, 0, 9999, -9999, 1.0, 100, 1, 73.486231, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1357, 7.266303, 0, 9999, -9999, 1.0, 100, 1, 56.459913, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1358, 0.134756, 0, 9999, -9999, 1.0, 100, 1, 0.247293, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1359, 46.718809, 0, 9999, -9999, 1.0, 100, 1, 70.633589, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1360, 2.755124, 0, 9999, -9999, 1.0, 100, 1, 17.135983, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1361, 14.705813, 0, 9999, -9999, 1.0, 100, 1, 63.207173, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1362, 13.507454, 0, 9999, -9999, 1.0, 100, 1, 79.107216, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1363, 0.003958, 0, 9999, -9999, 1.0, 100, 1, 0.036158, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1364, 0.005393, 0, 9999, -9999, 1.0, 100, 1, 0.061068, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1365, 6.6e-05, 0, 9999, -9999, 1.0, 100, 1, 0.000456, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1366, 0.490517, 0, 9999, -9999, 1.0, 100, 1, 1.229992, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1367, 5.467137, 0, 9999, -9999, 1.0, 100, 1, 43.863891, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1368, 0.007131, 0, 9999, -9999, 1.0, 100, 1, 3.298243, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1369, 4.60393, 0, 9999, -9999, 1.0, 100, 1, 7.968859, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1370, 0.193666, 0, 9999, -9999, 1.0, 100, 1, 0.343308, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1371, 30.371868, 0, 9999, -9999, 1.0, 100, 1, 81.767208, 0.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1372, 150.796341, 0, 9999, -9999, 1.0, 100, 1, 192.966588, 0.0, 0, 0, 0, 0, 0, 0, 0,
<filename>linlp/algorithm/viterbiMat/prob_emit_place.py prob_emit = { 'E': { '店': -5.2632081894344687, '顿': -4.19849745244204, '基': -4.039432757812353, '略': -5.6198831333732011, '楼': -6.4671809937604046, '南': -3.6537702770003686, '花': -5.0808866326405138, '港': -4.67542152453235, '那': -6.4671809937604046, '华': -7.5657932824285146, '腾': -3.9022316362988678, '堂': -6.1794989213086238, '古': -3.3761385404020889, '关': -7.5657932824285146, '德': -4.9267359528132557, '普': -7.5657932824285146, '沟': -3.9822743439724042, '伯': -5.9563553699944141, '加': -5.6198831333732011, '拿': -4.2335887722533103, '逊': -6.4671809937604046, '安': -5.0008439249669774, '达': -4.3469174575603136, '尔': -5.4863517407486784, '岛': -4.4747508290701985, '瓦': -7.5657932824285146, '根': -4.0104452209391006, '罗': -6.8726461018685692, '河': -7.5657932824285146, '亚': -3.8281236641451462, '江': -2.005111651412987, '支': -5.4863517407486784, '娥': -6.8726461018685692, '之': -6.8726461018685692, '汉': -6.4671809937604046, '子': -5.3685687050922946, '明': -5.6198831333732011, '园': -4.4747508290701985, '俄': -2.9214023832871416, '西': -3.1963454299614931, '波': -6.1794989213086238, '拉': -2.8836620553042946, '圈': -7.5657932824285146, '庄': -2.1019614774029045, '口': -3.7371518859394195, '矶': -7.5657932824285146, '比': -3.8769138283145783, '夷': -5.7740338132004592, '色': -5.2632081894344687, '兰': -3.4069101990688426, '森': -6.1794989213086238, '科': -7.5657932824285146, '得': -4.1318060779433683, '滨': -3.8045931667349522, '克': -7.5657932824285146, '登': -5.9563553699944141, '里': -4.2699564164241854, '驿': -5.9563553699944141, '的': -4.8577430813263041, '丹': -7.5657932824285146, '嘉': -5.0008439249669774, '山': -2.8836620553042946, '峡': -7.5657932824285146, '特': -5.0808866326405138, '镇': -5.7740338132004592, '斯': -4.19849745244204, '坪': -7.5657932824285146, '冈': -5.3685687050922946, '萨': -6.4671809937604046, '哥': -4.0692857209620339, '荷': -5.3685687050922946, '勒': -4.3076967444070329, '坡': -6.1794989213086238, '赛': -5.6198831333732011, '善': -5.9563553699944141, }, 'Z': { '大创': -16.821768640316588, '末##末': -4.9476470922955233e-08, }, 'X': { '民主路': -9.5025994539210696, '孙河乡': -10.195746634481015, '认识到': -10.195746634481015, '最大': -10.195746634481015, '牵制': -10.195746634481015, '往': -5.8390378077914242, '采购': -8.8094522733611242, '挥师': -10.195746634481015, '掌控': -9.0971343458129059, '幸福': -8.8094522733611242, '新区': -9.0971343458129059, '培训': -10.195746634481015, '回': -6.640398572991602, '文化': -10.195746634481015, '过来': -10.195746634481015, '新': -8.8094522733611242, '项目': -9.5025994539210696, '还有': -9.5025994539210696, '秦': -9.5025994539210696, '至': -3.6304816644456546, '金凤凰': -10.195746634481015, '打击': -9.5025994539210696, '经': -7.4876964333788054, '重回': -10.195746634481015, '沱': -8.8094522733611242, '重视': -8.5863087220469154, '桃山': -10.195746634481015, '园': -8.8094522733611242, '渡过': -10.195746634481015, '围绕': -10.195746634481015, '移师': -10.195746634481015, '正在': -8.5863087220469154, '”': -7.7978513616826453, '+': -7.9985220571447959, '吃': -10.195746634481015, '拜访': -10.195746634481015, '宣示': -9.5025994539210696, '下游': -10.195746634481015, '架设': -10.195746634481015, '侨商': -10.195746634481015, '五羊': -7.7108399846930151, '比作': -10.195746634481015, '抗议': -9.5025994539210696, '宏府': -10.195746634481015, '同': -6.8999097684766868, '福永': -8.8094522733611242, '红城': -9.5025994539210696, '看过': -10.195746634481015, '湍': -10.195746634481015, '地标': -9.5025994539210696, '老人': -9.0971343458129059, '孙家': -10.195746634481015, '庐州': -10.195746634481015, '未##数': -6.5321849883513687, ':': -7.7978513616826453, '回龙观': -9.5025994539210696, '吞并': -8.1163050928011788, '走访': -10.195746634481015, '南皮': -10.195746634481015, '焦庄乡': -8.4039871652529605, '小伙': -10.195746634481015, '女子': -9.0971343458129059, '自': -10.195746634481015, ';': -9.0971343458129059, '对阵': -9.5025994539210696, '范': -9.5025994539210696, '首都': -5.1458906272314788, '横穿': -10.195746634481015, '除': -9.0971343458129059, '开往': -5.1143422694965528, '流域': -9.0971343458129059, '在线': -9.0971343458129059, '调任': -10.195746634481015, '哪些': -10.195746634481015, '平和': -10.195746634481015, '正是': -10.195746634481015, '主要': -10.195746634481015, '或者': -7.2513076553145748, '实施': -8.5863087220469154, '劲旅': -9.5025994539210696, '五大': -10.195746634481015, '泉水': -9.0971343458129059, '以': -10.195746634481015, '谈': -9.5025994539210696, '被': -8.1163050928011788, '进驻': -10.195746634481015, '重返': -10.195746634481015, '哈拉布拉乡加勒帕勒': -10.195746634481015, '江都区': -10.195746634481015, '开设': -10.195746634481015, '管庄乡': -10.195746634481015, '留给': -10.195746634481015, '有': -7.6307972770194787, '飞来': -9.5025994539210696, '交付': -10.195746634481015, '巡回赛': -10.195746634481015, '分为': -10.195746634481015, '荣登': -10.195746634481015, '毗邻': -8.1163050928011788, '雪龙号': -10.195746634481015, '杀害': -9.0971343458129059, '警方': -10.195746634481015, '乌溪镇': -10.195746634481015, '苦情': -10.195746634481015, '茂林': -9.5025994539210696, '河边': -9.5025994539210696, '场上': -10.195746634481015, '帮助': -9.5025994539210696, '太阳宫': -10.195746634481015, '归': -10.195746634481015, ';': -7.1047041811226999, '滨海湾': -9.5025994539210696, '人': -9.5025994539210696, '散兵': -9.5025994539210696, '至于': -10.195746634481015, '暨': -8.5863087220469154, '城东': -8.5863087220469154, '就': -9.0971343458129059, '包括': -10.195746634481015, '河粉': -9.5025994539210696, '援助': -8.5863087220469154, '地': -10.195746634481015, '同期': -9.5025994539210696, '小': -8.2498364854257016, '走到': -8.2498364854257016, '河': -9.5025994539210696, '抱怨': -10.195746634481015, '以来': -8.5863087220469154, '精美': -9.5025994539210696, '上': -8.4039871652529605, '产业': -10.195746634481015, '侵害': -9.0971343458129059, '海景房': -10.195746634481015, '生态': -10.195746634481015, '东岳': -10.195746634481015, '详解': -10.195746634481015, '加大': -10.195746634481015, '村口': -10.195746634481015, '走进': -9.5025994539210696, '窦店': -10.195746634481015, '承担': -10.195746634481015, '袭击': -10.195746634481015, '“': -6.1703949437458663, '巡演': -10.195746634481015, '进入': -7.89316154148697, '飞回': -9.5025994539210696, '介绍': -9.5025994539210696, '国家': -7.6307972770194787, '、': -0.54560414199221863, '虱目鱼': -10.195746634481015, ',': -4.1602652019562596, '隔川乡': -10.195746634481015, '进港': -9.5025994539210696, '?': -9.5025994539210696, '南方': -9.5025994539210696, '华林路': -8.8094522733611242, '龙岗': -10.195746634481015, '绛帐镇': -10.195746634481015, '法治': -9.5025994539210696, '鹦鹉': -10.195746634481015, '美军': -10.195746634481015, '侵略': -7.4876964333788054, '秀英': -10.195746634481015, '透过': -10.195746634481015, '入境': -9.5025994539210696, '素有': -10.195746634481015, '往返': -7.2000143609270246, '(': -5.5323075403689481, '按照': -9.5025994539210696, '维尔': -10.195746634481015, '迁至': -8.2498364854257016, '三沙市': -10.195746634481015, '远眺': -10.195746634481015, '老区': -10.195746634481015, '市长': -10.195746634481015, '推介': -10.195746634481015, '收获': -10.195746634481015, '代表团': -10.195746634481015, '对于': -7.9985220571447959, '未##时': -7.4876964333788054, '驶往': -8.8094522733611242, '的': -3.9380790465983768, '打': -9.5025994539210696, '接到': -7.4231579122412343, '石榴': -10.195746634481015, '高新区': -9.5025994539210696, '奔赴': -10.195746634481015, '举行': -7.89316154148697, '入围': -10.195746634481015, '赴': -8.4039871652529605, '更名': -9.5025994539210696, '八路': -9.5025994539210696, '跑到': -9.5025994539210696, '出': -10.195746634481015, '产品': -10.195746634481015, '划归': -9.5025994539210696, '小将': -10.195746634481015, '玉蝉镇': -9.5025994539210696, '赤': -10.195746634481015, '以及': -5.6739580574319755, '抵': -10.195746634481015, '东坝': -9.5025994539210696, '不敌': -9.5025994539210696, '紧靠': -10.195746634481015, '南下': -9.5025994539210696, '残害': -10.195746634481015, '大舞台': -10.195746634481015, '江北': -10.195746634481015, '冷淡': -10.195746634481015, '口岸': -10.195746634481015, '搜集': -9.5025994539210696, '元帅': -10.195746634481015, '加': -10.195746634481015, '东岸': -10.195746634481015, '及其': -10.195746634481015, '玉兰': -10.195746634481015, '落实': -10.195746634481015, '对话': -10.195746634481015, '主办': -9.5025994539210696, '向': -5.9472513924316566, '进口': -10.195746634481015, '移民': -10.195746634481015, '玉川镇': -10.195746634481015, '撤出': -10.195746634481015, '侵占': -10.195746634481015, '临海': -10.195746634481015, '窃取': -9.0971343458129059, '年': -10.195746634481015, '最高': -9.0971343458129059, '城西': -10.195746634481015, '奥林匹克': -10.195746634481015, '渔民': -10.195746634481015, '‘': -10.195746634481015, '比': -8.2498364854257016, '直达': -7.4231579122412343, '虎': -9.0971343458129059, '问题': -10.195746634481015, '(': -5.4952662686885994, '负责': -9.5025994539210696, '了解': -10.195746634481015, '参与': -9.0971343458129059, '实现': -10.195746634481015, '感谢': -10.195746634481015, '环抱': -10.195746634481015, '产业园': -10.195746634481015, '翁': -10.195746634481015, '=': -10.195746634481015, '整个': -9.0971343458129059, '没有': -9.0971343458129059, '跟着': -10.195746634481015, '周围': -10.195746634481015, '封锁': -9.5025994539210696, '城市': -7.89316154148697, '以南': -10.195746634481015, '作者': -10.195746634481015, '~': -10.195746634481015, '通': -9.5025994539210696, '交叉口': -9.5025994539210696, '太夫人': -10.195746634481015, '抛弃': -10.195746634481015, '慰问': -10.195746634481015, '面对': -7.89316154148697, '为民': -10.195746634481015, '老坝港': -9.0971343458129059, '之后': -10.195746634481015, '得到': -8.8094522733611242, '获得': -9.5025994539210696, '街': -9.5025994539210696, '≈': -10.195746634481015, '湾': -10.195746634481015, '鼎兴': -10.195746634481015, '遇上': -7.5566893048657571, '榛': -10.195746634481015, '一家': -9.5025994539210696, '中南部': -9.5025994539210696, '超过': -8.8094522733611242, '开放': -9.0971343458129059, '继': -10.195746634481015, '小南海': -9.5025994539210696, '还是': -7.4231579122412343, '阿坝州': -10.195746634481015, '沦为': -10.195746634481015, '呼吁': -9.5025994539210696, '地处': -7.1047041811226999, '隔': -10.195746634481015, '岛国': -10.195746634481015, '移至': -10.195746634481015, '带到': -9.0971343458129059, '开展': -10.195746634481015, '让': -10.195746634481015, '大围': -9.5025994539210696, '拍摄': -9.5025994539210696, '港沟': -8.5863087220469154, '转赴': -10.195746634481015, '布吉': -9.5025994539210696, '交给': -9.5025994539210696, '沿': -8.2498364854257016, '传入': -9.0971343458129059, '需要': -10.195746634481015, '宛平': -10.195746634481015, '飞': -5.4862164331686811, '贺岁': -10.195746634481015, '平房乡': -10.195746634481015, '赤化镇': -7.9985220571447959, '外省': -10.195746634481015, '省': -10.195746634481015, '处于': -9.5025994539210696, '灌口': -10.195746634481015, '前总统': -10.195746634481015, '仅': -9.5025994539210696, '安村镇': -10.195746634481015, '钢铁': -9.5025994539210696, '只有': -10.195746634481015, '除了': -9.0971343458129059, '建': -9.0971343458129059, '为': -6.9768708096128149, '承接': -10.195746634481015, '南': -7.9985220571447959, '天大': -10.195746634481015, '援建': -9.0971343458129059, '是': -4.770796616999613, '沃尔纳德斯基': -10.195746634481015, '来回': -10.195746634481015, '仓埠街': -10.195746634481015, '属于': -7.3053748765848505, '东北部': -8.4039871652529605, '美丽': -9.5025994539210696, '成为': -6.8284508044945413, '所在地': -10.195746634481015, '西部': -8.8094522733611242, '距': -7.4876964333788054, '棒约翰': -10.195746634481015, '托森哈': -10.195746634481015, '号称': -10.195746634481015, '飞往': -5.4862164331686811, '保护': -10.195746634481015, '弗洛里': -10.195746634481015, '双岗': -10.195746634481015, '未##团': -9.5025994539210696, '关于': -9.0971343458129059, '未##串': -7.7108399846930151, '以北': -10.195746634481015, '前往': -5.9910540150900493, '属': -10.195746634481015, '宏': -10.195746634481015, '与': -4.0013412433763431, '哈拉': -10.195746634481015, '到': -3.759596266111588, '否认': -10.195746634481015, '固安': -10.195746634481015, '作为': -6.640398572991602, '涉及': -9.0971343458129059, '诗意': -10.195746634481015, '效仿': -9.0971343458129059, '国脚': -10.195746634481015, '由于': -10.195746634481015, '共有': -10.195746634481015, '打造': -8.2498364854257016, '自贸区': -10.195746634481015, '钜桥镇': -10.195746634481015, '广埠屯': -10.195746634481015, '乃至': -6.7300107316812889, '-': -4.7752116352087297, '提出': -10.195746634481015, '重灾区': -10.195746634481015, '仅有': -10.195746634481015, '任': -10.195746634481015, '发起': -10.195746634481015, '走出': -10.195746634481015, '担心': -10.195746634481015, '升': -9.5025994539210696, '部分': -9.5025994539210696, '回到': -7.3625332904247998, '入': -9.0971343458129059, '离': -8.2498364854257016, '对接': -10.195746634481015, '灵山卫': -10.195746634481015, '直逼': -10.195746634481015, '建立': -8.8094522733611242, '对抗': -8.8094522733611242, '对': -4.8251086063533526, '市': -6.303926336370389, ')': -8.4039871652529605, '捷安特': -10.195746634481015, '宫古岛': -9.5025994539210696, '直飞': -7.7978513616826453, '最佳': -10.195746634481015, '接受': -8.8094522733611242, '媒体': -10.195746634481015, '各': -10.195746634481015, '最': -10.195746634481015, '扣留': -10.195746634481015, '/': -8.1163050928011788, '山区': -10.195746634481015, '所处': -8.5863087220469154, '县': -9.0971343458129059, '学习': -10.195746634481015, '华埠': -10.195746634481015, '刘': -10.195746634481015, '返回': -7.0176928041330697, '环': -9.0971343458129059, '琴江': -10.195746634481015, '女': -9.5025994539210696, '就是': -9.5025994539210696, '原': -9.5025994539210696, '较': -9.5025994539210696, '占领': -10.195746634481015, '组织': -10.195746634481015, '吉安乡': -9.5025994539210696, '划入': -8.5863087220469154, '使': -9.5025994539210696, '应对': -10.195746634481015, '圭': -10.195746634481015, '女画家': -9.5025994539210696, '召开': -9.0971343458129059, '下辖': -10.195746634481015, '致': -10.195746634481015, '至上': -9.5025994539210696, '都城': -9.5025994539210696, '夸大': -10.195746634481015, '出兵': -9.5025994539210696, '靠近': -9.5025994539210696, '开拓': -9.5025994539210696, '主城区': -10.195746634481015, '多': -10.195746634481015, '同意': -10.195746634481015, '交通部长': -9.5025994539210696, '东部': -8.2498364854257016, '城区': -8.2498364854257016, '送到': -10.195746634481015, '》': -9.0971343458129059, '选择': -9.5025994539210696, '→': -9.0971343458129059, '比不上': -9.5025994539210696, '侵犯': -10.195746634481015, '瑶': -10.195746634481015, '雷平': -10.195746634481015, '首届': -10.195746634481015, '曝光': -9.5025994539210696, '公园': -10.195746634481015, '巴州': -10.195746634481015, '请求': -9.5025994539210696, '对岸': -10.195746634481015, '河段': -9.5025994539210696, '认为': -8.8094522733611242, '大国': -10.195746634481015, '过境': -10.195746634481015, '拥有': -9.0971343458129059, '中部': -8.4039871652529605, '归还': -8.8094522733611242, '运回': -10.195746634481015, '来到': -6.3245456235731243, '麻扎': -10.195746634481015, '老': -9.5025994539210696, '斯': -8.2498364854257016, '介入': -9.5025994539210696, '格兰': -9.0971343458129059, '敌视': -9.5025994539210696, '迁葬': -10.195746634481015, '经营': -10.195746634481015, '登上': -9.5025994539210696, '清远': -10.195746634481015, '收复': -9.5025994539210696, '居': -8.8094522733611242, '邻近': -9.5025994539210696, '内': -8.8094522733611242, '联合': -9.5025994539210696, '前进街': -10.195746634481015, '利用': -9.0971343458129059, '驴友': -9.0971343458129059, '认定': -10.195746634481015, '山包': -10.195746634481015, '等到': -10.195746634481015, '方向': -9.5025994539210696, '《': -4.7151077111390247, '县城': -7.89316154148697, '庙会': -10.195746634481015, '汉': -9.0971343458129059, '去': -7.1047041811226999, '西北部': -9.5025994539210696, '反超': -7.3053748765848505, '茶马': -10.195746634481015, '溪美': -10.195746634481015, '英': -10.195746634481015, '隶属于': -9.5025994539210696, '金融': -10.195746634481015, '分行': -8.8094522733611242, '授权': -9.5025994539210696, '阳山': -10.195746634481015, '赶到': -7.2000143609270246, '唯一': -10.195746634481015, '更换': -10.195746634481015, '逃到': -8.8094522733611242, '享受': -10.195746634481015, '扎西': -9.5025994539210696, '到达': -8.2498364854257016, '市郊': -8.5863087220469154, '浙': -10.195746634481015, '宗西乡': -10.195746634481015, '铁路': -8.5863087220469154, '距离': -8.1163050928011788, '依托': -9.5025994539210696, '古': -10.195746634481015, '给': -8.4039871652529605, '国宝级': -8.4039871652529605, '境内': -8.5863087220469154, '北': -10.195746634481015, '滨河路': -9.5025994539210696, '古都': -10.195746634481015, '外': -8.5863087220469154, '敦促': -9.5025994539210696, '籍': -9.5025994539210696, '东海岸': -10.195746634481015, '芙': -10.195746634481015, '拓展': -9.5025994539210696, '对付': -10.195746634481015, '指责': -10.195746634481015, '车展': -8.4039871652529605, '划定': -8.8094522733611242, '划伤': -9.0971343458129059, '和': -2.4289060973955028, '寄往': -10.195746634481015, '出发': -10.195746634481015, '责成': -7.9985220571447959, '殖民': -10.195746634481015, '举办': -10.195746634481015, '金山': -10.195746634481015, '洋': -10.195746634481015, '送': -10.195746634481015, '首府': -7.1512241967575925, '城北': -10.195746634481015, '不愿': -9.5025994539210696, '梦': -8.5863087220469154, '挑战': -8.4039871652529605, '南路': -10.195746634481015, '总统府': -9.5025994539210696, '位于': -5.6631471413277596, '西': -8.8094522733611242, '东南部': -9.5025994539210696, '受': -10.195746634481015, '逼': -10.195746634481015, '文明': -10.195746634481015, '通向': -9.5025994539210696, '总理': -8.5863087220469154, '口': -7.4876964333788054, '运往': -9.5025994539210696, '希望': -9.5025994539210696, '成立': -9.5025994539210696, '取代': -9.5025994539210696, '西路': -10.195746634481015, '抵达': -8.2498364854257016, '出口': -10.195746634481015, '南西': -10.195746634481015, '这些': -10.195746634481015, '’': -10.195746634481015, '路': -8.4039871652529605, '发往': -6.640398572991602, '携': -10.195746634481015, '相信': -10.195746634481015, '仅次于': -10.195746634481015, '发展': -8.8094522733611242, '英军': -8.8094522733611242, '看': -8.5863087220469154, '海监船': -6.1884134492485448, '不属于': -8.5863087220469154, '蜀都': -10.195746634481015, '寄回': -10.195746634481015, '医疗': -9.0971343458129059, '强征': -10.195746634481015, '甚至': -7.5566893048657571, '参加': -7.5566893048657571, '纽带': -10.195746634481015, '大街': -9.5025994539210696, '航空': -10.195746634481015, '罔顾': -10.195746634481015, '赶回': -8.5863087220469154, '直至': -9.0971343458129059, '涵盖': -10.195746634481015, '反对派': -8.8094522733611242, '落地': -9.5025994539210696, '岩前': -10.195746634481015, '大': -8.4039871652529605, '流向': -10.195746634481015, '领导': -10.195746634481015, '智库': -10.195746634481015, '由': -9.5025994539210696, '航母': -8.2498364854257016, '跟': -10.195746634481015, '媳妇': -10.195746634481015, '或': -6.1884134492485448, '北段': -9.5025994539210696, '用': -10.195746634481015, '陈城镇': -10.195746634481015, '关注': -10.195746634481015, '团结': -10.195746634481015, '—': -4.8438885010049493, '高罗乡': -10.195746634481015, '线': -10.195746634481015, '河道': -10.195746634481015, '十里铺': -8.8094522733611242, '通过': -9.5025994539210696, '逮捕': -10.195746634481015, '代表': -10.195746634481015, '良乡': -9.0971343458129059, '驻': -4.6084979760807663, '发射': -10.195746634481015, '高枧': -10.195746634481015, '选手': -9.0971343458129059, '位列': -10.195746634481015, '国兴': -10.195746634481015, '战胜': -8.8094522733611242, '省级': -10.195746634481015, '三孝口': -10.195746634481015, '仿照': -9.5025994539210696, '区': -8.5863087220469154, '扬': -9.5025994539210696, '大妈': -8.8094522733611242, '采访': -9.5025994539210696, '途经': -9.5025994539210696, '尤': -10.195746634481015, '又是': -8.5863087220469154, '定位': -10.195746634481015, '拉入': -10.195746634481015, '入主': -10.195746634481015, '湖': -10.195746634481015, '伊滨区': -10.195746634481015, '直捣': -10.195746634481015, '瓦子': -8.8094522733611242, '后': -9.0971343458129059, '欢迎': -8.4039871652529605, '欠': -9.5025994539210696, '西南部': -8.1163050928011788, '出售': -10.195746634481015, '城': -10.195746634481015, '景观': -9.0971343458129059, '南部': -6.0686122494359243, '攻占': -10.195746634481015, '竹料镇': -10.195746634481015, '驶向': -9.5025994539210696, '内有': -8.8094522733611242, '女星': -10.195746634481015, '支持': -10.195746634481015, '发现': -8.5863087220469154, '首位': -9.5025994539210696, '玉': -10.195746634481015, '赶赴': -9.0971343458129059, '威远': -10.195746634481015, '入侵': -8.8094522733611242, '裔': -8.2498364854257016, '受到': -9.5025994539210696, '阻挠': -9.5025994539210696, '港口': -8.8094522733611242, '科考': -9.5025994539210696, '北麓': -10.195746634481015, '大雾': -10.195746634481015, '从': -6.8999097684766868, '抗衡': -10.195746634481015, '莫特尔': -8.8094522733611242, '城管': -10.195746634481015, '古刹': -9.0971343458129059, '讯': -10.195746634481015, '滨河': -8.5863087220469154, '腾飞': -10.195746634481015, '铁': -10.195746634481015, '明知': -10.195746634481015, '才溪镇': -10.195746634481015, '殖民地': -10.195746634481015, '置于': -10.195746634481015, '侨居': -10.195746634481015, '来': -6.7617594299958697, '主导': -9.5025994539210696, '醇化': -10.195746634481015, '妻子': -9.5025994539210696, '承认': -9.0971343458129059, '七里渠': -9.0971343458129059, '中继': -8.8094522733611242, '庐丰乡': -10.195746634481015, '驶入': -10.195746634481015, '某': -9.0971343458129059, '干涉': -9.5025994539210696, '称': -7.3053748765848505, '加盟': -10.195746634481015, '移居': -8.8094522733611242, '交口': -10.195746634481015, '这个': -8.8094522733611242, '参观': -9.5025994539210696, '航运': -10.195746634481015, '给予': -9.5025994539210696, '西关': -10.195746634481015, '动': -8.4039871652529605, '州': -8.2498364854257016, '重点': -9.0971343458129059, '搬到': -8.5863087220469154, '退回': -10.195746634481015, '国父': -9.5025994539210696, '打败': -9.0971343458129059, '北岸': -10.195746634481015, '调研': -10.195746634481015, '门头沟': -7.9985220571447959, '合并': -10.195746634481015, '争': -9.5025994539210696, '新兴': -10.195746634481015, '面': -10.195746634481015, '梅林': -10.195746634481015, '面向': -9.5025994539210696, '南溪区': -7.89316154148697, ',': -8.8094522733611242, '及': -4.8678704656914347, '击败': -9.0971343458129059, '现有': -9.0971343458129059, '超越': -10.195746634481015, '检查': -10.195746634481015, '出席': -8.5863087220469154, '人大代表': -8.5863087220469154, '日': -10.195746634481015, '加强': -9.5025994539210696, '冰上': -10.195746634481015, '-': -7.2000143609270246, '清剿': -10.195746634481015, '设立': -10.195746634481015, '加上': -10.195746634481015, '撞击': -6.1884134492485448, '前川': -10.195746634481015, '将': -7.4231579122412343, '位居': -10.195746634481015, '金': -9.5025994539210696, '叫做': -10.195746634481015, '巨人': -8.5863087220469154, '建设': -8.4039871652529605, '竹竿': -10.195746634481015, '根据': -10.195746634481015, '无缘': -10.195746634481015, '滨湖': -9.0971343458129059, '游客': -8.5863087220469154, '并非': -10.195746634481015, '说明': -10.195746634481015, '针对': -8.5863087220469154, '垄': -9.5025994539210696, '省城': -10.195746634481015, '里': -10.195746634481015, '飞赴': -10.195746634481015, '市区': -7.6307972770194787, '这': -10.195746634481015, '赶来': -8.2498364854257016, '新大洲': -10.195746634481015, '北部': -8.2498364854257016, '西郊': -10.195746634481015, ')': -7.9985220571447959, '家家': -10.195746634481015, '草根': -10.195746634481015, '加入': -10.195746634481015, '省会': -7.9985220571447959, '要求': -8.5863087220469154, '撤销': -10.195746634481015, '绿色': -10.195746634481015, '沿线': -9.5025994539210696, '带来': -10.195746634481015, '自治县': -10.195746634481015, '东雷乡': -8.8094522733611242, '搅动': -10.195746634481015, '直抵': -9.0971343458129059, '纸坊街': -9.0971343458129059, '赌城': -8.4039871652529605, '先于': -9.5025994539210696, '北院': -10.195746634481015, '飞抵': -7.1047041811226999, '城建': -9.0971343458129059, '经停': -8.8094522733611242, '豪门': -10.195746634481015, '迁入': -10.195746634481015, '定鼎': -10.195746634481015, '未##人': -6.4580770161976471, '方面': -9.5025994539210696, '分局': -10.195746634481015, '海域': -8.8094522733611242, '等': -6.1703949437458663, '张湾街': -10.195746634481015, '地区': -7.4231579122412343, '已有': -9.0971343458129059, '鲤鱼洲': -10.195746634481015, '迁往': -10.195746634481015, '回老家': -9.0971343458129059, '传到': -10.195746634481015, '穿越': -10.195746634481015, '返': -9.0971343458129059, '东': -7.4876964333788054, '连接': -10.195746634481015, '尤其': -10.195746634481015, '反对': -9.5025994539210696, '殖民统治': -8.8094522733611242, '所属': -10.195746634481015, '通往': -7.7108399846930151, '开赴': -10.195746634481015, '紧邻': -9.5025994539210696, '把': -8.2498364854257016, '履行': -10.195746634481015, '送往': -10.195746634481015, '割让': -9.5025994539210696, '酸梨': -10.195746634481015, '视窗': -5.1985343607169003, '全国': -10.195746634481015, '顺': -10.195746634481015, '成': -10.195746634481015, '兰店乡': -10.195746634481015, '建成': -7.89316154148697, '海岸': -10.195746634481015, '赶往': -7.9985220571447959, '正义': -10.195746634481015, '在': -4.6944884239362885, '好人': -10.195746634481015, '甘孜州': -10.195746634481015, '省内': -10.195746634481015, }, 'S': { '始##始': 0.0, }, 'A': { '一家': -9.1953061836693966, '这批': -12.330800399598546, '只见': -11.637653219038601, '您': -12.330800399598546, '专习': -12.330800399598546, '往': -6.9417286697820453, '派驻': -10.721362487164445, '民主路': -11.637653219038601, '次等': -12.330800399598546, '只不过': -11.637653219038601, '该行': -10.944506038478655, '墨水': -12.330800399598546, '奋进': -12.330800399598546, '回': -5.9608994167703191, '其中': -6.4092219799547303, '倾情': -12.330800399598546, '团': -11.637653219038601, '飞离': -11.232188110930437, '漷县': -12.330800399598546, '不远处': -12.330800399598546, '武进': -12.330800399598546, '字': -11.637653219038601, '专': -12.330800399598546, '重视': -9.335068126044554, '开启': -10.133575822262326, '想来': -10.721362487164445, '皑皑': -12.330800399598546, '版': -10.028215306604499, '看涨': -12.330800399598546, '顽皮': -12.330800399598546, '侵袭': -9.4404286417023808, '”': -5.3638332609845625, '谈谈': -10.133575822262326, '搬出': -11.637653219038601, '搬动': -12.330800399598546, '抗议': -9.9329051268001756, '驾驭': -12.330800399598546, '续': -12.330800399598546, '枣园': -10.539040930370492, '科': -10.539040930370492, '老人': -12.330800399598546, '未##数': -4.3013675590173035, '分': -11.637653219038601, '微观': -11.232188110930437, '提督': -12.330800399598546, '巡航': -10.25135885791871, '拐过': -12.330800399598546, '查询': -9.1527465692506009, '村堡': -12.330800399598546, '堂': -12.330800399598546, '横穿': -10.133575822262326, '冲突': -11.232188110930437, '身边': -11.232188110930437, '激辩': -11.637653219038601, '百强县': -12.330800399598546, '争雄': -12.330800399598546, '驻地': -11.637653219038601, '模仿': -10.384890250543233, '祖籍': -8.0133122860622361, '此时': -10.944506038478655, '手绘': -11.637653219038601, '繁荣': -12.330800399598546, '巩固': -9.7658510421370099, '托管': -11.637653219038601, '烧': -9.6917430699832874, '我县': -10.944506038478655, '盼': -11.637653219038601, '谈': -10.25135885791871, '穿透': -12.330800399598546, '携': -10.384890250543233, '判决': -11.637653219038601, '获奖者': -12.330800399598546, '大货车': -12.330800399598546, '尚品': -12.330800399598546, '主持': -9.9329051268001756, '邨': -12.330800399598546, '开设': -10.944506038478655, '行游': -12.330800399598546, '精': -11.637653219038601, '获知': -12.330800399598546, '带动': -8.9635045696120716, '街头': -12.330800399598546, '纸杯': -12.330800399598546, '伸向': -11.232188110930437, '寺': -11.232188110930437, '更': -11.232188110930437, '锁定': -11.232188110930437, '荣登': -10.25135885791871, '驻兵': -11.637653219038601, '购买': -8.6419209454846104, '轻慢': -12.330800399598546, '百万个': -12.330800399598546, '寓所': -11.637653219038601, '约——': -12.330800399598546, '华滋': -12.330800399598546, '改装': -11.637653219038601, '瘫痪': -10.133575822262326, '气化': -10.133575822262326, '总部': -12.330800399598546, '创新': -9.7658510421370099, '舞': -12.330800399598546, '接过': -11.232188110930437, '上述': -10.384890250543233, '现': -8.8342928381320664, '遭受': -9.6227501984963357, '入股': -11.232188110930437, '归': -11.232188110930437, '支队': -12.330800399598546, '供应商': -12.330800399598546, '跟团游': -12.330800399598546, '开卷': -12.330800399598546, '法甲': -11.637653219038601, '无': -10.539040930370492, '路段': -12.330800399598546, '有利于': -8.8342928381320664, '跳出': -9.9329051268001756, '此项': -10.944506038478655, '买家': -11.637653219038601, '军乐': -12.330800399598546, '孤立': -10.944506038478655, '忆': -9.386361420432106, '重演': -10.721362487164445, '聚': -8.9296030179363903, '每当': -10.25135885791871, '救助': -11.232188110930437, '撞到': -12.330800399598546, '副食品': -12.330800399598546, '提起': -9.5582116773587646, '放掉': -11.637653219038601, '翻看': -12.330800399598546, '看重': -11.232188110930437, '此片': -12.330800399598546, '更何况': -11.637653219038601, '琥珀': -10.944506038478655, '渲染': -9.6917430699832874, '可能': -11.637653219038601, '馈赠': -12.330800399598546, '谋': -11.637653219038601, '威慑': -10.539040930370492, '大学': -12.330800399598546, '有效': -12.330800399598546, '承担': -10.384890250543233, '袭击': -9.1953061836693966, '强化': -9.335068126044554, '飞临': -10.384890250543233, '不住': -11.637653219038601, '馆长': -12.330800399598546, '围绕': -7.911959791801948, '强大': -8.4806527978884869, '汇林': -11.637653219038601, '国家': -9.386361420432106, '丰田': -12.330800399598546, '共建': -9.9329051268001756, '邻居': -12.330800399598546, '坝': -10.721362487164445, '布告': -12.330800399598546, '现居': -9.386361420432106, '是不是': -10.25135885791871, '治理': -10.944506038478655, '安': -10.384890250543233, '揭开': -10.944506038478655, '拘于': -10.944506038478655, '趟': -9.5582116773587646, '简单': -12.330800399598546, '寮': -12.330800399598546, '进来': -12.330800399598546, '集中': -10.944506038478655, '自己': -10.25135885791871, '首家': -12.330800399598546, '随后': -10.028215306604499, '中国人': -12.330800399598546, '悲愤': -12.330800399598546, '透过': -10.539040930370492, '交出': -12.330800399598546, '得到': -8.2703573890521263, '往返': -8.3605084860464238, '处置': -10.944506038478655, '歪曲': -11.637653219038601, '转入': -10.721362487164445, '定': -10.721362487164445, '会议': -8.3234672143660742, '远眺': -10.721362487164445, '举办地': -10.384890250543233, '【': -7.1267937125217502, '环保部': -11.637653219038601, '组成': -12.330800399598546, '风靡': -9.335068126044554, '坚定': -12.330800399598546, '屠杀': -11.637653219038601, '取得': -10.028215306604499, '汇聚': -11.232188110930437, '大路': -12.330800399598546, '宣告': -10.944506038478655, '福': -8.9635045696120716, '供给': -11.232188110930437, '抱': -11.637653219038601, '中央社': -12.330800399598546, '英语': -12.330800399598546, '兼顾': -11.637653219038601, '投身于': -10.384890250543233, '减持': -12.330800399598546, '乙烯': -11.637653219038601, '本身': -12.330800399598546, '台湾岛': -12.330800399598546, '客运站': -12.330800399598546, '追捧': -12.330800399598546, '仔': -12.330800399598546, '园区': -12.330800399598546, '契合': -11.232188110930437, '致函': -10.384890250543233, '秦岭—让': -12.330800399598546, '玉兰': -10.539040930370492, '初一': -12.330800399598546, '了': -4.008406286487376, '南岸': -11.637653219038601, '吃透': -10.944506038478655, '扩展到': -11.232188110930437, '食堂': -12.330800399598546, '误闯': -12.330800399598546, '原由': -11.637653219038601, '牵动': -11.232188110930437, '天佑': -10.944506038478655, '比': -7.0526857403680285, '升格': -12.330800399598546, '玻': -11.637653219038601, '抓捕': -12.330800399598546, '怎么': -11.637653219038601, '珠江口': -12.330800399598546, '抢滩': -12.330800399598546, '民革': -10.539040930370492, '该县': -8.5466107656802848, '推行': -10.944506038478655, '噶丹': -11.232188110930437, '凝': -12.330800399598546, '民主党': -12.330800399598546, '外环线': -12.330800399598546, '空': -12.330800399598546, '投资国': -11.232188110930437, '留意': -11.637653219038601, '缓解': -8.6419209454846104, '遣使': -11.637653219038601, '战': -10.944506038478655, '逃往': -9.0349635335942171, '埋藏': -12.330800399598546, '提到': -8.6172283328942374, '年轻': -10.539040930370492, '攻入': -10.721362487164445, '封锁': -7.4709879952368734, '戍守': -12.330800399598546, '遇难': -12.330800399598546, '品': -12.330800399598546, '涨': -12.330800399598546, '习惯': -11.637653219038601, '主宰': -11.232188110930437, '抛弃': -11.637653219038601, '显': -10.721362487164445, '出具': -12.330800399598546, '响亮': -11.637653219038601, '世界级': -12.330800399598546, '翔': -10.721362487164445, '大清': -10.133575822262326, '赎命': -12.330800399598546, '遇上': -10.944506038478655, '出航': -12.330800399598546, '属国': -12.330800399598546, '成就': -10.539040930370492, '客车': -10.721362487164445, '接近': -9.335068126044554, '民生': -11.232188110930437, '也': -12.330800399598546, '干': -12.330800399598546, '农夫山泉': -12.330800399598546, '处': -10.028215306604499, '刷新': -10.25135885791871, '演艺': -10.944506038478655, '卖掉': -12.330800399598546, '预订': -12.330800399598546, '盟友': -10.539040930370492, '澳头': -12.330800399598546, '海': -8.9296030179363903, '占用': -9.9329051268001756, '研究': -8.4595993886906555, '~': -12.330800399598546, '陈列': -12.330800399598546, '瑞': -11.232188110930437, '发布': -9.2862779618751237, '转道': -10.721362487164445, '炫': -10.25135885791871, '笔下': -12.330800399598546, '下穿': -10.25135885791871, '东侧': -12.330800399598546, '传言': -10.944506038478655, '表示': -8.5466107656802848, '德迈': -12.330800399598546, '繁殖地': -12.330800399598546, '害怕': -11.637653219038601, '最高': -12.330800399598546, '航空母舰': -9.2862779618751237, '回馈': -11.232188110930437, '追问': -12.330800399598546, '刊出': -11.232188110930437, '馒头': -11.637653219038601, '称之为': -12.330800399598546, '媲美': -10.25135885791871, '如同': -10.721362487164445, '连同': -10.539040930370492, '运用于': -11.232188110930437, '酷似': -8.8650644967988192, '政府': -10.721362487164445, '待': -12.330800399598546, '成为': -5.4555683123219687, '所在地': -9.1119245747303452, '换挡': -12.330800399598546, '咱们': -10.539040930370492, '歌唱': -10.133575822262326, '聚居地': -11.637653219038601, '建国': -12.330800399598546, '如果': -7.0126804057543293, '振兴': -10.384890250543233, '东郊': -12.330800399598546, '升': -11.232188110930437, '轮': -12.330800399598546, '搭建': -10.944506038478655, '驶离': -10.028215306604499, '多于': -11.637653219038601, '属': -8.1719173162388739, '乌龙': -12.330800399598546, '窃听': -12.330800399598546, '国民党': -10.133575822262326, '主帅': -12.330800399598546, '福州港': -12.330800399598546, '赛季': -10.944506038478655, '没': -12.330800399598546, '贯彻': -10.944506038478655, '放眼': -10.944506038478655, '不见': -12.330800399598546, '地点': -10.25135885791871, '扎': -10.721362487164445, '要素': -12.330800399598546, '贸易公司': -12.330800399598546, '翔安': -9.8458937498105463, '农家乐': -12.330800399598546, '庆贺': -11.637653219038601, '钦点': -12.330800399598546, '庆': -12.330800399598546, '靠': -10.384890250543233, '突出': -9.386361420432106, '屏幕': -12.330800399598546, '为此': -10.25135885791871, '转出': -12.330800399598546, '污染': -11.232188110930437, '半山': -11.232188110930437, '联邦': -12.330800399598546, '带给': -9.8458937498105463, '共聚': -12.330800399598546, '送进': -11.232188110930437, '公路': -9.8458937498105463, '提速': -12.330800399598546, '对': -4.3597146460929386, '山城': -12.330800399598546, '记录': -9.6917430699832874, '有名': -12.330800399598546, '调处': -12.330800399598546, '想': -10.721362487164445, '回去': -12.330800399598546, '确立': -10.25135885791871, '所处': -10.944506038478655, '相关': -10.539040930370492, '麦当劳': -11.637653219038601, '去哪儿': -12.330800399598546, '海棠湾': -11.232188110930437, '地铁': -10.721362487164445, '华埠': -12.330800399598546, '返回': -6.7817243147033262, '水上': -11.637653219038601, '节气': -12.330800399598546, '下半月': -12.330800399598546, '於': -10.944506038478655, '鲁': -12.330800399598546, '落入': -10.539040930370492, '齐全': -12.330800399598546, '高铁': -10.384890250543233, '救出':
""" Globus Contents Manager """ import os import json import mimetypes import tempfile import time from traitlets import (Unicode, Int) import globus_sdk from fair_research_login import NativeClient from tornado.web import HTTPError from datetime import datetime from nbformat import from_dict, reads try: #PY3 from base64 import encodebytes, decodebytes except ImportError: #PY2 from base64 import encodestring as encodebytes, decodestring as decodebytes from globuscontents.ipycompat import ( ContentsManager, HasTraits ) from globuscontents.utils import ( base_model, convert_to_datetime, DUMMY_CREATED_DATE, NBFORMAT_VERSION ) TUTORIAL_ENDPOINT1 = "ddb59aef-6d04-11e5-ba46-22000b92c6ec" TUT1_BASE_PATH = "/~" DEFAULT_CLIENT_ID = '7414f0b4-7d05-4bb6-bb00-076fa3f17cf5' DEFAULT_APP_NAME = 'Globus Jupyter Contents Manager' DEFAULT_SCOPES = 'urn:globus:auth:scope:transfer.api.globus.org:all' class GlobusContentsManager(ContentsManager, HasTraits): """ Custom Contents Manager with Globus functionality. """ def __init__(self, *args, **kwargs): super(GlobusContentsManager, self).__init__(*args, **kwargs) # TODO: Make this check for tokens in the environment (i.e., JupyterHub) # Then load via Native App. Figure out login. client = NativeClient(client_id=self.client_id, app_name=self.app_name) tokens = client.load_tokens() transfer_access_token = tokens['transfer.api.globus.org']['access_token'] # then use that token to create an AccessTokenAuthorizer transfer_auth = globus_sdk.AccessTokenAuthorizer(transfer_access_token) # finally, use the authorizer to create a TransferClient object self.transfer_client = globus_sdk.TransferClient(authorizer=transfer_auth) self.transfer_client.endpoint_autoactivate(self.globus_remote_endpoint) # TODO: How to handle caching dir? Needs to be writable. On laptops, # tmp dirs may not be accessible by GCP #self._cache_dir = tempfile.TemporaryDirectory() self._cache_dir = '/Users/rpwagner/tmp/jupyter_contents_cache' client_id = Unicode(help="""The Globus Native App client ID to use.""").tag(config=True) def _client_id_default(self): return DEFAULT_CLIENT_ID app_name = Unicode(help="""The Globus Native App name to use.""").tag(config=True) def _app_name_default(self): return DEFAULT_APP_NAME scopes = Unicode(help="""The Globus Auth scopes to use.""").tag(config=True) def _scopes_default(self): return DEFAULTS_SCOPES globus_remote_endpoint = Unicode(help="""The remote endpoint to serve data from.""").tag(config=True) def _globus_remote_endpoint_default(self): return TUTORIAL_ENDPOINT1 globus_remote_endpoint_basepath = Unicode(help="""The absolute path on the remote endpoint to become the root of the Jupyter file system.""").tag(config=True) def _globus_remote_endpoint_basepath_default(self): return TUT1_BASE_PATH globus_local_endpoint = Unicode(help="""Local Globus endpoint for caching files.""").tag(config=True) def _globus_local_endpoint_default(self): return '' globus_cache_wait = Int(help="""How long to wait for a caching transfer to finish in seconds.""").tag(config=True) def _globus_cache_wait_default(self): return 60 globus_cache_wait_poll = Int(help="""How frequently to poll (in seconds) a transfer status when caching.""").tag(config=True) def _globus_cache_wait_poll_default(self): return 10 #https://app.globus.org/file-manager?origin_id= def get(self, path, content=True, type=None, format=None): """ Takes a path for an entity and returns its model Parameters ---------- path : str the API path that describes the relative path for the target content : bool Whether to include the contents in the reply type : str, optional The requested type - 'file', 'notebook', or 'directory'. Will raise HTTPError 400 if the content doesn't match. format : str, optional The requested format for file contents. 'text' or 'base64'. Ignored if this returns a notebook or directory model. Returns ------- model : dict the contents model. If content=True, returns the contents of the file or directory as well. """ # TODO: CACHING! Layer goes here. self.log.debug('Globus Contents get path = {} type = {}'.format(path, type)) if type == 'directory': model = self._get_dir(path, content=content) elif type == 'notebook' or (type is None and path.endswith('.ipynb')): model = self._get_notebook(path, content=content) elif type == 'file': model = self._get_file(path, content=content, format=format) else: raise HTTPError(400, u'%s no type specified' % path, reason='bad type') return model def dir_exists(self, path): self.log.debug('Globus Contents dir_exists path = {}'.format(path)) ep_path = os.path.join(self.globus_remote_endpoint_basepath, path.lstrip('/')) if ep_path[-1] != '/': ep_path = '{}/'.format(ep_path) resp = self.transfer_client.operation_ls(self.globus_remote_endpoint, path=ep_path, show_hidden=False) # if value returned is not an exception or None then no directory exists at the given path if isinstance(resp, globus_sdk.exc.TransferAPIError) or resp is None: return False # if no exception was returned then the given path does point to a directory return True def file_exists(self, file_path): # TODO: Implement return True def rename_file(self, old_path, new_path): # Rename a file or directory # if possible rename local file/directory try: super().rename_file(old_path, new_path) except FileNotFoundError: print("File/directory not found on local storage") # get the id of the endpoint that the file/directory exists on # endpoint_id = input("Enter the Endpoint ID") # get the transfer client #transfer_client = self.fs.get_transfer_client() # auth = self.nc.get_authorizers()['transfer.api.globus.org'] # transfer_client = globus_sdk.TransferClient(authorizer=auth) try: # Make sure that endpoint is activated transfer_client.endpoint_autoactivate(self.globus_remote_endpoint) # rename the file/directory on the endpoint transfer_client.operation_rename(self.globus_remote_endpoint, old_path, new_path) except globus_sdk.exc.TransferAPIError: print("Error occurred when trying to rename file/directory") def delete_file(self, path): # Delete the file or directory at the given path try: # if possible, delete local file/directory super().delete_file(path) except: pass try: # get the id of the endpoint that the file/directory exists on # endpoint_id = input("Enter the Endpoint ID") # get the transfer client auth = self.nc.get_authorizers()['transfer.api.globus.org'] transfer_client = globus_sdk.TransferClient(authorizer=auth) ddata = globus_sdk.DeleteData(transfer_client, self.globus_remote_endpoint, recursive=True) # Recursively delete path contents (because of recursive flag set above) ddata.add_item(path) # Make sure that endpoint is activated transfer_client.endpoint_autoactivate(self.globus_remote_endpoint) submit_result = transfer_client.submit_delete(ddata) print("Task ID:", submit_result["task_id"]) except: pass return def is_hidden(self, path): return False def save(self, model, path): """ Save a file or directory model to a path. """ # try: # # try to save file/directory model locally # super().save(model, path) # except: # pass if "type" not in model: raise HTTPError(400, "No model type provided") if "content" not in model and model["type"] != "directory": raise HTTPError(400, "No file content provided") if model["type"] not in ("file", "directory", "notebook"): raise HTTPError(400, "Unhandled contents type: %s" % model["type"]) try: if model["type"] == "notebook": validation_message = self._save_notebook(model, path) elif model["type"] == "file": validation_message = self._save_file(model, path) else: validation_message = self._save_directory(path) except Exception as exc: err_message = "Unexpected error while saving file: %s %s" % (path, exc) raise HTTPError(500, err_message) model = self.get(path, type=model["type"], content=False) if validation_message is not None: model["message"] = validation_message return model def _save_notebook(self, model, path): nb_contents = from_dict(model["content"]) self.check_and_sign(nb_contents, path) file_contents = json.dumps(model["content"]) #self.fs.writenotebook(path, file_contents) self.validate_notebook_model(model) return model.get("message") def _save_file(self, model, path): file_contents = model["content"] file_format = model.get('format') self.fs.write(path, file_contents, file_format) return "" def _save_directory(self, path): """ Creates a new directory using the specified path. """ self.fs.mkdir(path) return "" def _model_from_ls_item(self, path, ls_item): model = base_model(os.path.join(path, ls_item['name'])) model['size'] = ls_item['size'] model['last_modified'] = ls_item['last_modified'] if ls_item['type'] == 'dir': model['type'] = 'directory' else: model['type'] = 'file' if ls_item['name'].endswith('.ipynb'): model['type'] = 'notebook' model['format'] = 'json' else: model['mimetype'] = mimetypes.guess_type(ls_item['name'])[0] or 'text/plain' model['format'] = 'text' if not model['mimetype'].startswith('text'): model['format'] = 'base64' return model def _get_dir(self, path, content=True): self.log.debug('Globus Contents path path = {}'.format(path)) ep_path = os.path.join(self.globus_remote_endpoint_basepath, path.lstrip('/')) if ep_path[-1] != '/': ep_path = '{}/'.format(ep_path) resp = self.transfer_client.operation_ls(self.globus_remote_endpoint, path=ep_path, show_hidden=False) if isinstance(resp, globus_sdk.exc.TransferAPIError) or resp is None: raise HTTPError(400, '{} is not a directory'.format(path), reason='bad type') model = base_model(path) model['type'] = 'directory' model['size'] = None if content: model["format"] = "json" model["content"] = [self._model_from_ls_item(path, item) for item in resp] self.log.debug('Globus Contents _dir_model path = {} model = {}'.format(path, str(model))) return model def _get_notebook(self, path, content=True, format=None): # TODO: Needs checkpoints # This should become the general "get file" layer. # Turn off notifications. self.log.debug('Globus Contents get notebook path = {}'.format(path)) remote_notebook_path = os.path.join(self.globus_remote_endpoint_basepath, path.lstrip('/')) local_notebook_path = os.path.join(self._cache_dir, path.lstrip('/')) self.log.debug('remote notebook path = {}'.format(remote_notebook_path)) self.log.debug('local notebook path = {}'.format(local_notebook_path)) nb_dir = os.path.dirname(local_notebook_path) label = "Jupyter ContentsManager caching" # TransferData() automatically gets a submission_id for once-and-only-once submission tdata = globus_sdk.TransferData(self.transfer_client, self.globus_remote_endpoint, self.globus_local_endpoint, notify_on_succeeded=False, label=label) tdata.add_item(remote_notebook_path, local_notebook_path) # Ensure endpoints are activated self.transfer_client.endpoint_autoactivate(self.globus_remote_endpoint) self.transfer_client.endpoint_autoactivate(self.globus_local_endpoint) submit_result = self.transfer_client.submit_transfer(tdata) task_id = submit_result['task_id'] if self.transfer_client.task_wait(task_id, timeout=self.globus_cache_wait, polling_interval=self.globus_cache_wait_poll): task = self.transfer_client.get_task(task_id) status = task["status"] if status != "SUCCEEDED": self.transfer_client.cancel_task(task_id) HTTPError(502, 'Unable to cache {}'.format(path), reason='bad transfer') else: self.transfer_client.cancel_task(task_id) HTTPError(408, 'Too long getting {}'.format(path), reason='slow caching') model = base_model(path) model['type'] = 'notebook' file_content = open(local_notebook_path, 'r').read() nb_content = reads(file_content, as_version=NBFORMAT_VERSION) self.mark_trusted_cells(nb_content, path) model["format"] = "json" model["content"] = nb_content self.validate_notebook_model(model) return model def _read_local_file(self, local_path, format): """Read a non-notebook file. local_path: The path to be read. format: If 'text', the contents will be decoded as UTF-8. If 'base64', the raw bytes contents will be encoded as base64. If not specified, try to decode as UTF-8, and fall back to base64 """ with open(local_path, 'rb') as f: bcontent = f.read() if format is None or format == 'text': # Try to interpret as unicode if format is unknown or if unicode # was explicitly requested. try: return bcontent.decode('utf8'), 'text' except UnicodeError: if format == 'text': raise HTTPError( 400, "%s is not UTF-8 encoded" % local_path, reason='bad format', )
points... if out is True: val = ((f.normal).normalized() * inset_amount) else: val = -((f.normal).normalized() * inset_amount) p6 = angle_rotation(p, p + val, vec1, radians(90)) else: # if the corner is an actual corner val = ((f.normal).normalized() * h) if out is True: # this -(p - (vec2.normalized() * adj))) is just the freaking axis afaik... p6 = angle_rotation( p, p + val, -(p - (vec2.normalized() * adj)), -radians(90) ) else: p6 = angle_rotation( p, p - val, ((p - (vec1.normalized() * adj)) - (p - (vec2.normalized() * adj))), -radians(90) ) orientation_vertex_list.append(p6) new_inner_face = [] orientation_vertex_list_length = len(orientation_vertex_list) ovll = orientation_vertex_list_length for j in range(ovll): q = orientation_vertex_list[j] q1 = orientation_vertex_list[(j - 1) % ovll] q2 = orientation_vertex_list[(j + 1) % ovll] # again, these are just vectors between somewhat displaced corner vertices vec1_ = q - q1 vec2_ = q - q2 ang_ = vec1_.angle(vec2_) # the angle between them if round(degrees(ang_)) == 180 or round(degrees(ang_)) == 0.0: # again... if it's really a line... v = bme.verts.new(q) new_inner_face.append(v) dict_0[j].append(v) else: # s.a. if radius is False: h_ = distance * (1 / cos(ang_ * 0.5)) d = distance elif radius is True: h_ = distance / sin(ang_ * 0.5) d = distance / tan(ang_ * 0.5) # max(d) is vec1_.magnitude * 0.5 # or vec2_.magnitude * 0.5 respectively # only functional difference v if d > vec1_.magnitude * 0.5: d = vec1_.magnitude * 0.5 if d > vec2_.magnitude * 0.5: d = vec2_.magnitude * 0.5 # only functional difference ^ q3 = q - (vec1_.normalized() * d) q4 = q - (vec2_.normalized() * d) # these are new verts somewhat offset from the corners rp_ = q - ((q - ((q3 + q4) * 0.5)).normalized() * h_) # reference point inside the curvature axis_ = vec1_.cross(vec2_) # this should really be just the face normal vec3_ = rp_ - q3 vec4_ = rp_ - q4 rot_ang = vec3_.angle(vec4_) cornerverts = [] for o in range(number_of_sides + 1): # this calculates the actual new vertices q5 = angle_rotation(rp_, q4, axis_, rot_ang * o / number_of_sides) v = bme.verts.new(q5) # creates new bmesh vertices from it bme.verts.index_update() dict_0[j].append(v) cornerverts.append(v) cornerverts.reverse() new_inner_face.extend(cornerverts) if out is False: f = bme.faces.new(new_inner_face) f.select_set(True) elif out is True and kp is True: f = bme.faces.new(new_inner_face) f.select_set(True) n2_ = len(dict_0) # these are the new side faces, those that don't depend on cornertype for o in range(n2_): list_a = dict_0[o] list_b = dict_0[(o + 1) % n2_] bme.faces.new([list_a[0], list_b[0], list_b[-1], list_a[1]]) bme.faces.index_update() # cornertype 1 - ngon faces if type_enum == 'opt0': for k in dict_0: if len(dict_0[k]) > 2: bme.faces.new(dict_0[k]) bme.faces.index_update() # cornertype 2 - triangulated faces if type_enum == 'opt1': for k_ in dict_0: q_ = dict_0[k_][0] dict_0[k_].pop(0) n3_ = len(dict_0[k_]) for kk in range(n3_ - 1): bme.faces.new([dict_0[k_][kk], dict_0[k_][(kk + 1) % n3_], q_]) bme.faces.index_update() del_ = [bme.faces.remove(f) for f in list_del] if del_: del del_ # Operator class MESH_OT_face_inset_fillet(Operator): bl_idname = "mesh.face_inset_fillet" bl_label = "Face Inset Fillet" bl_description = ("Inset selected and Fillet (make round) the corners \n" "of the newly created Faces") bl_options = {"REGISTER", "UNDO"} # inset amount inset_amount : bpy.props.FloatProperty( name="Inset amount", description="Define the size of the Inset relative to the selection", default=0.04, min=0, max=100.0, step=1, precision=3 ) # number of sides number_of_sides : bpy.props.IntProperty( name="Number of sides", description="Define the roundness of the corners by specifying\n" "the subdivision count", default=4, min=1, max=100, step=1 ) distance : bpy.props.FloatProperty( name="", description="Use distance or radius for corners' size calculation", default=0.04, min=0.00001, max=100.0, step=1, precision=3 ) out : bpy.props.BoolProperty( name="Outside", description="Inset the Faces outwards in relation to the selection\n" "Note: depending on the geometry, can give unsatisfactory results", default=False ) radius : bpy.props.BoolProperty( name="Radius", description="Use radius for corners' size calculation", default=False ) type_enum : bpy.props.EnumProperty( items=[('opt0', "N-gon", "N-gon corners - Keep the corner Faces uncut"), ('opt1', "Triangle", "Triangulate corners")], name="Corner Type", default="opt0" ) kp : bpy.props.BoolProperty( name="Keep faces", description="Do not delete the inside Faces\n" "Only available if the Out option is checked", default=False ) def draw(self, context): layout = self.layout layout.label(text="Corner Type:") row = layout.row() row.prop(self, "type_enum", text="") row = layout.row(align=True) row.prop(self, "out") if self.out is True: row.prop(self, "kp") row = layout.row() row.prop(self, "inset_amount") row = layout.row() row.prop(self, "number_of_sides") row = layout.row() row.prop(self, "radius") row = layout.row() dist_rad = "Radius" if self.radius else "Distance" row.prop(self, "distance", text=dist_rad) def execute(self, context): # this really just prepares everything for the main function inset_amount = self.inset_amount number_of_sides = self.number_of_sides distance = self.distance out = self.out radius = self.radius type_enum = self.type_enum kp = self.kp edit_mode_out() ob_act = context.active_object bme = bmesh.new() bme.from_mesh(ob_act.data) # this face_index_list = [f.index for f in bme.faces if f.select and f.is_valid] if len(face_index_list) == 0: self.report({'WARNING'}, "No suitable Face selection found. Operation cancelled") edit_mode_in() return {'CANCELLED'} elif len(face_index_list) != 0: face_inset_fillet(bme, face_index_list, inset_amount, distance, number_of_sides, out, radius, type_enum, kp) bme.to_mesh(ob_act.data) edit_mode_in() return {'FINISHED'} # ********** Edit Multiselect ********** class VIEW3D_MT_Edit_MultiMET(Menu): bl_label = "Multi Select" def draw(self, context): layout = self.layout layout.operator_context = 'INVOKE_REGION_WIN' layout.operator("multiedit.allselect", text="All Select Modes", icon='RESTRICT_SELECT_OFF') # Select Tools class VIEW3D_MT_Select_Vert(Menu): bl_label = "Select Vert" def draw(self, context): layout = self.layout layout.operator_context = 'INVOKE_REGION_WIN' layout.operator("multiedit.vertexselect", text="Vertex Select Mode", icon='VERTEXSEL') layout.operator("multiedit.vertedgeselect", text="Vert & Edge Select", icon='EDGESEL') layout.operator("multiedit.vertfaceselect", text="Vert & Face Select", icon='FACESEL') class VIEW3D_MT_Select_Edge(Menu): bl_label = "Select Edge" def draw(self, context): layout = self.layout layout.operator_context = 'INVOKE_REGION_WIN' layout.operator("multiedit.edgeselect", text="Edge Select Mode", icon='EDGESEL') layout.operator("multiedit.vertedgeselect", text="Edge & Vert Select", icon='VERTEXSEL') layout.operator("multiedit.edgefaceselect", text="Edge & Face Select", icon='FACESEL') class VIEW3D_MT_Select_Face(Menu): bl_label = "Select Face" def draw(self, context): layout = self.layout layout.operator_context = 'INVOKE_REGION_WIN' layout.operator("multiedit.faceselect", text="Face Select Mode", icon='FACESEL') layout.operator("multiedit.vertfaceselect", text="Face & Vert Select", icon='VERTEXSEL') layout.operator("multiedit.edgefaceselect", text="Face & Edge Select", icon='EDGESEL') # multiple edit select modes. class VIEW3D_OT_multieditvertex(Operator): bl_idname = "multiedit.vertexselect" bl_label = "Vertex Mode" bl_description = "Vert Select Mode On" bl_options = {'REGISTER', 'UNDO'} def execute(self, context): if context.object.mode != "EDIT": bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='VERT') if bpy.ops.mesh.select_mode != "EDGE, FACE": bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='VERT') return {'FINISHED'} class VIEW3D_OT_multieditedge(Operator): bl_idname = "multiedit.edgeselect" bl_label = "Edge Mode" bl_description = "Edge Select Mode On" bl_options = {'REGISTER', 'UNDO'} def execute(self, context): if context.object.mode != "EDIT": bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='EDGE') if bpy.ops.mesh.select_mode != "VERT, FACE": bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='EDGE') return {'FINISHED'} class VIEW3D_OT_multieditface(Operator): bl_idname = "multiedit.faceselect" bl_label = "Multiedit Face" bl_description = "Face Select Mode On" bl_options = {'REGISTER', 'UNDO'} def execute(self, context): if context.object.mode != "EDIT": bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='FACE') if bpy.ops.mesh.select_mode != "VERT, EDGE": bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='FACE') return {'FINISHED'} class VIEW3D_OT_multieditvertedge(Operator): bl_idname = "multiedit.vertedgeselect" bl_label = "Multiedit Face" bl_description = "Vert & Edge Select Modes On" bl_options = {'REGISTER', 'UNDO'} def execute(self, context): if context.object.mode != "EDIT": bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='VERT') if bpy.ops.mesh.select_mode != "VERT, EDGE, FACE": bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='VERT') bpy.ops.mesh.select_mode(use_extend=True, use_expand=False, type='EDGE') return {'FINISHED'} class VIEW3D_OT_multieditvertface(Operator): bl_idname = "multiedit.vertfaceselect" bl_label = "Multiedit Face" bl_description = "Vert & Face Select Modes On" bl_options = {'REGISTER', 'UNDO'} def execute(self, context): if context.object.mode != "EDIT": bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='VERT') if bpy.ops.mesh.select_mode != "VERT, EDGE, FACE": bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='VERT') bpy.ops.mesh.select_mode(use_extend=True, use_expand=False, type='FACE') return {'FINISHED'} class VIEW3D_OT_multieditedgeface(Operator): bl_idname = "multiedit.edgefaceselect" bl_label = "Mode Face Edge" bl_description = "Edge & Face Select Modes On" bl_options = {'REGISTER', 'UNDO'} def execute(self, context): if context.object.mode != "EDIT": bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='EDGE') if bpy.ops.mesh.select_mode != "VERT, EDGE, FACE": bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='EDGE') bpy.ops.mesh.select_mode(use_extend=True, use_expand=False, type='FACE') return {'FINISHED'} class VIEW3D_OT_multieditall(Operator): bl_idname = "multiedit.allselect" bl_label = "All Edit Select Modes" bl_description = "Vert & Edge & Face Select Modes On" bl_options = {'REGISTER', 'UNDO'} def execute(self, context): if context.object.mode != "EDIT": bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='VERT') if bpy.ops.mesh.select_mode != "VERT, EDGE, FACE": bpy.ops.object.mode_set(mode="EDIT") bpy.ops.mesh.select_mode(use_extend=False, use_expand=False, type='VERT') bpy.ops.mesh.select_mode(use_extend=True, use_expand=False, type='EDGE') bpy.ops.mesh.select_mode(use_extend=True, use_expand=False, type='FACE') return {'FINISHED'} # ######################################## # ##### GUI and registration ############# # ######################################## # menu containing all tools class VIEW3D_MT_edit_mesh_tools(Menu): bl_label = "Mesh Tools" def draw(self, context): layout = self.layout layout.operator("mesh.remove_doubles") layout.operator("mesh.dissolve_limited") layout.operator("mesh.flip_normals") props = layout.operator("mesh.quads_convert_to_tris") props.quad_method = props.ngon_method = 'BEAUTY' layout.operator("mesh.tris_convert_to_quads") layout.operator('mesh.vertex_chamfer', text="Vertex Chamfer") layout.operator("mesh.bevel", text="Bevel Vertices").affect = 'VERTICES' layout.operator('mesh.offset_edges', text="Offset Edges") layout.operator('mesh.fillet_plus', text="Fillet Edges") layout.operator("mesh.face_inset_fillet", text="Face Inset
self.tanhFM[NN1:NN] = RW.tanhF self.omegaM[NN1:NN] = RW.omega self.kiM[NN1:NN] = RW.ki self.aiM[NN1:NN] = RW.ai self.kDirM[NN1:NN,:] =RW.kDir[:,:] self.phiM[NN1:NN] = RW.phi for ij in range(3): self.vDir_c[ij] = self.vDir[ij] self.vDir_ = self.vDir_c for ij in range(self.Nall): for kk in range(3): self.kDir_cM[3*ij+kk] = self.kDirM[ij,kk] self.waveDir_cM[3*ij+kk] = old_div(self.kDirM[ij,kk], self.kiM[ij]) self.omega_cM[ij] = self.omegaM[ij] self.ki_cM[ij] =self.kiM[ij] self.tanh_cM[ij] = self.tanhFM[ij] self.ai_cM[ij] = self.aiM[ij] self.phi_cM[ij] = self.phiM[ij] self.kDirM_ = self.kDir_cM self.omegaM_ = self.omega_cM self.kiM_ =self.ki_cM self.aiM_ = self.ai_cM self.tanhM_ = self.tanh_cM self.phiM_ = self.phi_cM self.waveDirM_ = self.waveDir_cM def _cpp_eta(self, x, t): return __cpp_etaRandom(x,t,self.kDirM_, self.omegaM_,self.phiM_,self.aiM_, self.Nall,self.fast) def eta(self, x, t): """Calculates free surface elevation (RandomWaves class) Parameters ---------- x : numpy.ndarray Position vector t : float Time variable Returns -------- float Free-surface elevation as a float """ cython.declare(xx=cython.double[3]) xx[0] = x[0] xx[1] = x[1] xx[2] = x[2] return self._cpp_eta(xx,t) def _cpp_u(self, U, x, t): __cpp_uDir(U, x,t,self.kDirM_, self.kiM_, self.omegaM_,self.phiM_,self.aiM_,self.mwl,self.depth, self.Nall, self.waveDirM_, self.vDir_, self.tanhM_, self.gAbs, self.fast) def u(self, x, t): """Calculates wave velocity vector (RandomWaves class) Parameters ---------- x : numpy.ndarray Position vector t : float Time variable Returns -------- numpy.ndarray Velocity vector as 1D array """ cython.declare(xx=cython.double[3]) cython.declare(cppU=cython.double[3]) for ii in range(3): xx[ii] = x[ii] cppU[ii] = 0. U = np.zeros(3,) self._cpp_u(cppU,xx,t) U[0] = cppU[0] U[1] = cppU[1] U[2] = cppU[2] return U class DirectionalWaves(object): """ This class is used for generating directional random waves using linear reconstruction of components from a wave spectrum Parameters ---------- M : int Number of directional components Tp : float Peak wave period Hs : float Significant wave height mwl : float Still water level depth : float Water depth waveDir0 : numpy.ndarray Leading wave direction vector g : Numpy array Gravitational acceleration vector N : int Number of frequency components bandFactor : float Spectral band factor. fmax = bandFactor/Tp, fmin = 1/(bandFactor*Tp) spectName : string Name of spectral distribution spreadName : string Name of spreading distribution spectral_params : dict Dictionary of arguments specific to the spectral distribution (see RandomWaves class) spread_params : dict Dictionary of arguments specific to the spreading distribution Example for Cos-2s = {"s": 10} Example for Mitsuyashu-type = {"fp": 1/Tp, "smax":10} phi : numpy.ndarray Component phases (if set to None, phases are picked at random) phiSymm : bool Switch for enabling a symmetric phase allocation across directional components fast : bool Switch for enabling optimised functions """ def __cinit__(self, M, #half bin of frequencies Tp, # np array with Hs, # mwl,#m significant wave height depth , #m depth waveDir0, # Lead direction g, #peak frequency N, # Number of frequencies bandFactor, #accelerationof gravity spectName ,# random words will result in error and return the available spectra spreadName ,# random words will result in error and return the available spectra spectral_params = None, #JONPARAMS = {"gamma": 3.3, "TMA":True,"depth": depth} spread_params = None, phi=None, # phi must be an (2*M+1)*N numpy array phiSymm = False, # When true, phi[-pi/2,0] is symmetric to phi[0,pi/2] fast = True ): self.fast = fast validSpread = [cos2s,mitsuyasu] spread_fun = loadExistingFunction(spreadName, validSpread) self.Mtot = 2*M+1 self.N = N self.Nall = self.Mtot*self.N self.waveDir0 = setDirVector(waveDir0) self.vDir = setVertDir(g) if(self.Nall > 100000): logEvent("ERROR! Wavetools.py: Maximum (number of frequencies) x (No of spectra) for DirectionalWaves is 100000 ",level=0) # Loading Random waves to get the frequency array the wavelegnths and the frequency spectrum RW = RandomWaves( Tp, # np array with Hs, mwl,#m significant wave height depth, #m depth self.waveDir0, g, #peak frequency N, bandFactor, #accelerationof gravity spectName,# random words will result in error and return the available spectra spectral_params, #JONPARAMS = {"gamma": 3.3, "TMA":True,"depth": depth} phi = None ) # Directional waves propagate usually in a plane -90 to 90 deg with respect to the direction vector, normal to the gavity direction. Rotating the waveDir0 vector around the g vector to produce the directional space from .SpatialTools import rotation3D thetas = np.linspace(old_div(-M_PI,2),old_div(M_PI,2),2*M+1) dth = (thetas[1] - thetas[0]) self.waveDirs = np.zeros((2*M+1,3),) self.phiDirs = np.zeros((2*M+1,N),) self.aiDirs = np.zeros((2*M+1,N),) self.gAbs = sqrt(g[0]*g[0]+g[1]*g[1]+g[2]*g[2]) temp_array = np.zeros((1,3),) temp_array[0,:] = waveDir0 directions = list(range(0,self.Mtot)) # initialising wave directions for rr in directions: theta = thetas[rr] self.waveDirs[rr,:] = rotation3D(temp_array,theta,self.vDir)[0,:] self.waveDirs[rr,:]=setDirVector( self.waveDirs[rr,:]) # Initialising phasing if phi is None: self.phiDirs = 2.0*M_PI*np.random.rand(self.Mtot,RW.fi.shape[0]) elif np.shape(phi) == (2*M+1,RW.fi.shape[0]): self.phiDirs = phi else: logEvent("ERROR! Wavetools.py: phi in DirectionalWaves class must be given either as None or as a list with 2*M + 1 numpy arrays with length N") sys.exit(1) if (phiSymm): for i in range(0,M): self.phiDirs[M+1+i,:] = self.phiDirs[self.M - 1 - i,:] theta_m = reduceToIntervals(thetas,dth) if (spread_params is None): Si_Sp = spread_fun(theta_m,RW.fim) else: try: Si_Sp = spread_fun(theta_m,RW.fim, **spread_params) except: logEvent('ERROR! Wavetools.py: Additional spread parameters are not valid for the %s spectrum' %spectName) sys.exit(1) # Setting amplitudes #Normalising the spreading function freq = list(range(0,N)) # Normalising integral over all frequencies for ii in freq: Si_Sp[:,ii] = normIntegral(Si_Sp[:,ii],theta_m) Si_Sp[:,ii]*= RW.Si_Jm[ii] # Creating amplitudes spectrum self.aiDirs[:] = np.sqrt(2.*returnRectangles3D(Si_Sp,theta_m,RW.fim)) self.mwl = mwl self.depth = depth self.kDirs = np.zeros((self.N, self.Mtot, 3),"d") for nn in range(self.N): for mm in range(self.Mtot): self.kDirs[nn,mm,:] = RW.ki[nn]*self.waveDirs[mm,:] for ij in range(3): self.vDir_c[ij] = self.vDir[ij] self.vDir_ = self.vDir_c for mm in range(self.Mtot): for nn in range(self.N): ij = mm * self.N + nn self.ai_c[ij] = self.aiDirs[mm,nn] self.phi_c[ij] = self.phiDirs[mm,nn] self.omega_c[ij] = RW.omega[nn] self.ki_c[ij] =RW.ki[nn] self.tanh_c[ij] = RW.tanhF[nn] for kk in range(3): self.kDir_c[3*ij+kk] = self.kDirs[nn,mm,kk] self.waveDir_c[3*ij+kk] = self.waveDirs[mm,kk] self.kDir_ = self.kDir_c self.omega_ = self.omega_c self.ki_ =self.ki_c self.ai_ = self.ai_c self.tanh_ = self.tanh_c self.phi_ = self.phi_c self.waveDir_ = self.waveDir_c def _cpp_eta(self, x, t): return __cpp_etaRandom(x,t,self.kDir_, self.omega_,self.phi_,self.ai_, self.Nall, self.fast) def eta(self, x, t): """Calculates free surface elevation (RandomWaves class) Parameters ---------- x : numpy.ndarray Position vector t : float Time variable Returns -------- float Free-surface elevation as a float """ cython.declare(xx=cython.double[3]) xx[0] = x[0] xx[1] = x[1] xx[2] = x[2] return self._cpp_eta(xx,t) def _cpp_u(self,U, x, t): __cpp_uDir(U, x,t,self.kDir_, self.ki_, self.omega_,self.phi_,self.ai_,self.mwl,self.depth, self.Nall, self.waveDir_, self.vDir_, self.tanh_, self.gAbs, self.fast) def u(self, x, t): """Calculates wave velocity vector (RandomWaves class) Parameters ---------- x : numpy.ndarray Position vector t : float Time variable Returns -------- numpy.ndarray Velocity vector as 1D array """ cython.declare(xx=cython.double[3]) cython.declare(cppU=cython.double[3]) for ii in range(3): xx[ii] = x[ii] cppU[ii] = 0. U = np.zeros(3,) self._cpp_u(cppU,xx,t) U[0] = cppU[0] U[1] = cppU[1] U[2] = cppU[2] return U class TimeSeries(object): """This class is used for generating waves from an arbirtrary free-surface elevation time series Parameters ---------- timeSeriesFile : string Time series file name (csv or txt) skiprows : int Number of header rows in time series file timeSeriesPosition : numpy.ndarrat Coordinates of the gauge / signal location depth : float Water depth N : int Number of frequency components mwl : float Still water level waveDir : numpy.ndarray Leading wave direction vector g : Numpy array Gravitational acceleration vector cutoffTotal : float Cut off fraction, applied both at the leading and tailing parts of the series rec_direct : bool Switch for activating direct decomposition window_params : dict Dictionary of parameters for window method e.g. window_params = {"Nwaves":15, "Tm": Tp/1.1, "Window":"costap"} (minimum parameters required) e.g. window_params = {"Nwaves":15, "Tm": Tp/1.1, "Window":"costap", "Overlap":0.5, "Cutoff":0.2} (full range of parameters) arrayData : bool Switch for passing the time series as an array (False by default) seriesArray : numpy.ndarray Free surface elevation time series given in an array format (None by default) fast : bool Switch for enabling optimised functions """ def __init__(self, timeSeriesFile, # e.g.= "Timeseries.txt", skiprows, timeSeriesPosition, depth , N , #number of frequency bins mwl , #mean water level waveDir, g, cutoffTotal = 0.01, rec_direct = True, window_params = None, #If rec_direct = False then wind_params = {"Nwaves":Nwaves,"Tm":Tm,"Window":wind_filt,"Overlap":overlap,"Cutoff":cutoff} arrayData = False, seriesArray = None, Lgen = np.array([0.,0.,0]), fast = True ): self.fast = fast self.rec_direct = rec_direct # Setting the depth self.depth = depth # Number of wave components self.N = N self.tanhF = np.zeros(N,"d") Nwaves = None # Position of timeSeriesFile if(len(timeSeriesPosition)==3): self.x0
+= offset_norm_criterion(pt_offset[pt_valid_index], gt_offset[pt_valid_index]) '''point offset dir loss''' offset_dir_loss += compute_offset_dir_loss( pt_offset, gt_offset, instance_labels, ignore_label=cfg.ignore_label) loss_out['offset_norm_loss'] = (offset_norm_loss, (instance_labels != cfg.ignore_label).sum()) loss_out['offset_dir_loss'] = (offset_dir_loss, (instance_labels != cfg.ignore_label).sum()) loss += cfg.loss_weights['point_offset_norm'] * offset_norm_loss + \ cfg.loss_weights['point_offset_dir'] * offset_dir_loss '''center related loss''' if 'center_preds' in loss_inp.keys(): center_heatmaps = [] center_semantic_labels = [] center_offset_norm_loss = torch.zeros(1).cuda() center_offset_dir_loss = torch.zeros(1).cuda() center_preds, point_coords, sampled_indexes, instance_centers, instance_sizes, batch_offsets = loss_inp['center_preds'] center_semantic_preds, _, point_semantic_labels = loss_inp['center_semantic_preds'] center_offset_preds, _, point_coords, point_instance_info, instance_labels = loss_inp['center_offset_preds'] for batch_index in range(1, len(batch_offsets)): point_coord = point_coords[batch_offsets[batch_index - 1]:batch_offsets[batch_index]] instance_center = instance_centers[instance_centers[:, 0] == (batch_index - 1), 1:] instance_size = instance_sizes[instance_centers[:, 0] == (batch_index - 1), 1:] ### produce center probability of sampled points sampled_index = sampled_indexes[sampled_indexes[:, 0] == batch_index, 1] center_heatmap = generate_adaptive_heatmap( point_coord[sampled_index, :].double().cpu(), instance_center.cpu(), instance_size.cpu(), min_IoU=cfg.min_IoU )['heatmap'] center_heatmaps.append(center_heatmap.cuda()) point_semantic_label = point_semantic_labels[batch_offsets[batch_index - 1]:batch_offsets[batch_index]] center_semantic_labels.append(point_semantic_label[sampled_index]) '''center offset loss''' center_instance_info = point_instance_info[batch_offsets[batch_index - 1]:batch_offsets[batch_index]] center_instance_info = center_instance_info[sampled_index] center_coord = point_coords[batch_offsets[batch_index - 1]:batch_offsets[batch_index]] center_coord = center_coord[sampled_index] center_offset_preds = center_offset_preds.view(-1, 3) center_offset_pred = center_offset_preds[sampled_indexes[:, 0] == batch_index, :] instance_label = instance_labels[batch_offsets[batch_index - 1]:batch_offsets[batch_index]] instance_label = instance_label[sampled_index] gt_offsets = center_instance_info[:, 0:3] - center_coord # (8196, 3) center_diff = center_offset_pred - gt_offsets # (N, 3) center_dist = torch.sum(torch.abs(center_diff), dim=-1) # (N) valid = (instance_label != cfg.ignore_label).float() center_offset_norm_loss += torch.sum(center_dist * valid) / (torch.sum(valid) + 1e-6) gt_offsets_norm = torch.norm(gt_offsets, p=2, dim=1) # (N), float gt_offsets_ = gt_offsets / (gt_offsets_norm.unsqueeze(-1) + 1e-8) center_offsets_norm = torch.norm(center_offset_pred, p=2, dim=1) center_offsets_ = center_offset_pred / (center_offsets_norm.unsqueeze(-1) + 1e-8) direction_diff = - (gt_offsets_ * center_offsets_).sum(-1) # (N) center_offset_dir_loss += torch.sum(direction_diff * valid) / (torch.sum(valid) + 1e-6) '''center loss''' center_heatmaps = torch.cat(center_heatmaps, dim=0).to(torch.float32) center_loss = center_criterion(center_preds.view(-1), center_heatmaps) '''center semantic loss''' center_semantic_labels = torch.cat(center_semantic_labels, dim=0) center_semantic_loss = center_semantic_criterion( center_semantic_preds.view(-1, 20), center_semantic_labels ) center_offset_norm_loss = center_offset_norm_loss / cfg.batch_size center_offset_dir_loss = center_offset_dir_loss / cfg.batch_size loss_out['center_probs_loss'] = (center_loss, sampled_indexes.shape[0]) loss_out['center_semantic_loss'] = (center_semantic_loss, sampled_indexes.shape[0]) loss_out['center_offset_norm_loss'] = (center_offset_norm_loss, sampled_indexes.shape[0]) loss_out['center_offset_dir_loss'] = (center_offset_dir_loss, sampled_indexes.shape[0]) loss += cfg.loss_weights['center_prob'] * center_loss + \ cfg.loss_weights['center_semantic'] * center_semantic_loss + \ cfg.loss_weights['center_offset_norm_loss'] * center_offset_norm_loss + \ cfg.loss_weights['center_offset_dir_loss'] * center_offset_dir_loss if (epoch > cfg.prepare_epochs) and ('proposal_scores' in loss_inp.keys()): '''score loss''' scores, proposals_idx, proposals_offset, instance_pointnum = loss_inp['proposal_scores'] # scores: (nProposal, 1), float32 # proposals_idx: (sumNPoint, 2), int, cpu, dim 0 for cluster_id, dim 1 for corresponding point idxs in N # proposals_offset: (nProposal + 1), int, cpu # instance_pointnum: (total_nInst), int ious = pointgroup_ops.get_iou(proposals_idx[:, 1].cuda(), proposals_offset.cuda(), instance_labels, instance_pointnum) # (nProposal, nInstance), float gt_ious, gt_instance_idxs = ious.max(1) # (nProposal) float, long gt_scores = get_segmented_scores(gt_ious, cfg.fg_thresh, cfg.bg_thresh) score_loss = score_criterion(torch.sigmoid(scores.view(-1)), gt_scores) score_loss = score_loss.mean() loss_out['score_loss'] = (score_loss, gt_ious.shape[0]) loss += cfg.loss_weights['score'] * score_loss if 'proposal_confidences' in loss_inp.keys(): proposals_confidence_preds, proposals_idx_shifts, proposals_offset_shifts, instance_pointnum = loss_inp['proposal_confidences'] # scores: (nProposal, 1), float32 # proposals_idx: (sumNPoint, 2), int, cpu, dim 0 for cluster_id, dim 1 for corresponding point idxs in N # proposals_offset: (nProposal + 1), int, cpu # instance_pointnum: (total_nInst), int proposal_confidence_loss = torch.zeros(1).cuda() for proposal_index in range(len(proposals_confidence_preds)): ious = pointgroup_ops.get_iou( proposals_idx_shifts[proposal_index][:, 1].cuda(), proposals_offset_shifts[proposal_index].cuda(), instance_labels, instance_pointnum ) # (nProposal, nInstance), float gt_ious, gt_instance_idxs = ious.max(1) # (nProposal) float, long gt_scores = get_segmented_scores(gt_ious, cfg.fg_thresh, cfg.bg_thresh) conf_loss = confidence_criterion( torch.sigmoid(proposals_confidence_preds[proposal_index].view(-1)), gt_scores) proposal_confidence_loss += conf_loss.mean() loss_out['proposal_confidence_loss'] = (proposal_confidence_loss, gt_ious.shape[0]) loss += cfg.loss_weights['proposal_confidence_loss'] * proposal_confidence_loss ### three different feature term losses mentioned in OccuSeg if cfg.feature_variance_loss['activate'] and ('feature_variance_loss' in loss_inp.keys()): point_features, instance_labels = loss_inp['feature_variance_loss'] valid_instance_index = (instance_labels != cfg.ignore_label) instance_features = scatter_mean( point_features[valid_instance_index], instance_labels[valid_instance_index], dim=0 ) feature_variance_loss = scatter_mean( torch.relu( torch.norm( instance_features[instance_labels[valid_instance_index]] - \ point_features[valid_instance_index], p=2, dim=1 ) - cfg.feature_variance_loss['variance_threshold']) ** 2, instance_labels[valid_instance_index], dim=0 ).mean() loss_out['feature_variance_loss'] = (feature_variance_loss, instance_labels.shape[0]) loss += cfg.loss_weights['feature_variance_loss'] * feature_variance_loss if cfg.feature_distance_loss['activate'] and ('feature_distance_loss' in loss_inp.keys()): point_features, instance_labels = loss_inp['feature_distance_loss'] valid_instance_index = (instance_labels != cfg.ignore_label) instance_features = scatter_mean( point_features[valid_instance_index], instance_labels[valid_instance_index], dim=0 ) instance_dist_mat = torch.norm( instance_features.unsqueeze(dim=0) - instance_features.unsqueeze(dim=1), dim=2) instance_dist_mat = torch.relu( (2 * cfg.feature_distance_loss['distance_threshold'] - instance_dist_mat) ** 2) instance_dist_mat[range(len(instance_dist_mat)), range(len(instance_dist_mat))] = 0 feature_distance_loss = instance_dist_mat.sum() / ( instance_dist_mat.shape[0] * (instance_dist_mat.shape[0] - 1)) loss_out['feature_distance_loss'] = (feature_distance_loss, instance_labels.shape[0]) loss += cfg.loss_weights['feature_distance_loss'] * feature_distance_loss if cfg.feature_instance_regression_loss['activate'] and ('feature_instance_regression_loss' in loss_inp.keys()): point_features, instance_labels = loss_inp['feature_instance_regression_loss'] valid_instance_index = (instance_labels != cfg.ignore_label) instance_features = scatter_mean( point_features[valid_instance_index], instance_labels[valid_instance_index], dim=0 ) feature_instance_regression_loss = torch.mean(torch.norm(instance_features, p=2, dim=1), dim=0) loss_out['feature_instance_regression_loss'] = (feature_instance_regression_loss, instance_labels.shape[0]) loss += cfg.loss_weights['feature_instance_regression_loss'] * feature_instance_regression_loss ### occupancy loss to predict the voxel number for each voxel if ('occupancy' in cfg.model_mode.split('_')) and ('voxel_occupancy_loss' in loss_inp.keys()): point_occupancy_preds, voxel_instance_labels, voxel_occupancy_labels = loss_inp['voxel_occupancy_loss'] voxel_occupancy_loss = torch.zeros(1).cuda() for point_occupancy_pred in point_occupancy_preds: valid_voxel_index = voxel_instance_labels != -100 point_occupancy_prediction = point_occupancy_pred[valid_voxel_index].squeeze(dim=1) voxel_instance_labels = voxel_instance_labels[valid_voxel_index] voxel_occupancy_labels = voxel_occupancy_labels[valid_voxel_index] voxel_occupancy_loss += scatter_mean( torch.abs(point_occupancy_prediction - torch.log(voxel_occupancy_labels.float())), voxel_instance_labels ).mean() voxel_occupancy_loss = voxel_occupancy_loss / len(point_occupancy_preds) loss_out['voxel_occupancy_loss'] = (voxel_occupancy_loss, point_occupancy_preds[0].shape[0]) loss += cfg.loss_weights['voxel_occupancy_loss'] * voxel_occupancy_loss if ('local_point_semantic' in loss_inp.keys()) and (cfg.local_proposal['scatter_mean_target'] == False): # local proposal point semantic loss calculate local_point_semantic_scores, local_proposals_idx, labels = loss_inp['local_point_semantic'] local_point_semantic_loss = torch.zeros(1).cuda() for local_point_semantic_score in local_point_semantic_scores: local_point_semantic_loss += semantic_criterion(local_point_semantic_score, labels[local_proposals_idx[:, 1].long()]) # local proposal point offset losses local_point_offset_preds, local_proposals_idx, coords, instance_info, instance_labels = loss_inp['local_point_offset'] local_point_offset_norm_loss = torch.zeros(1).cuda() local_point_offset_dir_loss = torch.zeros(1).cuda() for local_point_offset_pred in local_point_offset_preds: gt_offsets = instance_info[:, 0:3] - coords # (N, 3) gt_offsets = gt_offsets[local_proposals_idx[:, 1].long(), :] pt_diff = local_point_offset_pred - gt_offsets # (N, 3) pt_dist = torch.sum(torch.abs(pt_diff), dim=-1) # (N) valid = (instance_labels[local_proposals_idx[:, 1].long()] != cfg.ignore_label).float() local_point_offset_norm_loss += torch.sum(pt_dist * valid) / (torch.sum(valid) + 1e-6) gt_offsets_norm = torch.norm(gt_offsets, p=2, dim=1) # (N), float gt_offsets_ = gt_offsets / (gt_offsets_norm.unsqueeze(-1) + 1e-8) local_point_offsets_norm = torch.norm(local_point_offset_pred, p=2, dim=1) local_point_offsets_ = local_point_offset_pred / (local_point_offsets_norm.unsqueeze(-1) + 1e-8) direction_diff = - (gt_offsets_ * local_point_offsets_).sum(-1) # (N) local_point_offset_dir_loss += torch.sum(direction_diff * valid) / (torch.sum(valid) + 1e-6) loss_out['local_point_semantic_loss'] = (local_point_semantic_loss, local_proposals_idx.shape[0]) loss_out['local_point_offset_norm_loss'] = (local_point_offset_norm_loss, valid.sum()) loss_out['local_point_offset_dir_loss'] = (local_point_offset_dir_loss, valid.sum()) loss += cfg.loss_weights['local_point_semantic_loss'] * local_point_semantic_loss + \ cfg.loss_weights['local_point_offset_norm'] * local_point_offset_norm_loss + \ cfg.loss_weights['local_point_offset_dir'] * local_point_offset_dir_loss if ('point_reconstructed_coords' in loss_inp.keys()) and (cfg.point_xyz_reconstruction_loss['activate']): point_reconstructed_coords, coords_float = loss_inp['point_reconstructed_coords'] point_xyz_reconstruction_loss = point_reconstruction_criterion(point_reconstructed_coords, coords_float) loss_out['point_xyz_reconstruction_loss'] = (point_xyz_reconstruction_loss, coords_float.shape[0]) loss += cfg.loss_weights['point_xyz_reconstruction_loss'] * point_xyz_reconstruction_loss if ('instance_id_preds' in loss_inp.keys()) and (cfg.instance_classifier['activate']): instance_id_preds, instance_labels = loss_inp['instance_id_preds'] point_instance_id_loss = point_instance_id_criterion(instance_id_preds, instance_labels) loss_out['point_instance_id_loss'] = (point_instance_id_loss, instance_labels.shape[0]) loss += cfg.loss_weights['point_instance_id_loss'] * point_instance_id_loss if ('local_point_feature_discriminative_loss' in loss_inp.keys()) and (cfg.local_proposal['local_point_feature_discriminative_loss']): proposals_point_features, proposals_idx, instance_labels = loss_inp['local_point_feature_discriminative_loss'] local_feature_variance_loss = torch.zeros(1).cuda() local_feature_distance_loss = torch.zeros(1).cuda() local_feature_instance_regression_loss = torch.zeros(1).cuda() for proposal_idx in proposals_idx[:, 0].unique(): proposal_valid_idx = (proposals_idx[:, 0] == proposal_idx) proposals_point_feature = proposals_point_features[proposal_valid_idx] proposal_valid_idx = proposals_idx[proposal_valid_idx, 1].long() instance_label = instance_labels[proposal_valid_idx] ## variance_loss valid_instance_index = (instance_label != cfg.ignore_label) instance_features = scatter_mean( proposals_point_feature[valid_instance_index], instance_label[valid_instance_index], dim=0 ) local_feature_variance_loss += scatter_mean( torch.relu( torch.norm( instance_features[instance_label[valid_instance_index]] - \ proposals_point_feature[valid_instance_index], p=2, dim=1 ) - cfg.feature_variance_loss['variance_threshold']) ** 2, instance_label[valid_instance_index], dim=0 ).mean() ## distance_loss instance_features = instance_features[instance_features != torch.zeros(32).cuda()] if instance_features.ndimension() == 1: instance_features = instance_features.unsqueeze(dim=0) instance_dist_mat = torch.norm( instance_features.unsqueeze(dim=0) - instance_features.unsqueeze(dim=1), dim=2) instance_dist_mat = torch.relu( (2 * cfg.feature_distance_loss['distance_threshold'] - instance_dist_mat) ** 2) instance_dist_mat[range(len(instance_dist_mat)), range(len(instance_dist_mat))] = 0 local_feature_distance_loss += instance_dist_mat.sum() / max( (instance_dist_mat.shape[0] * (instance_dist_mat.shape[0] - 1)), 1) ## instance_regression_loss local_feature_instance_regression_loss += torch.mean(torch.norm(instance_features, p=2, dim=1), dim=0) local_feature_variance_loss = local_feature_variance_loss / len(proposals_idx[:, 0].unique()) local_feature_distance_loss = local_feature_distance_loss / len(proposals_idx[:, 0].unique()) local_feature_instance_regression_loss = local_feature_instance_regression_loss / len(proposals_idx[:, 0].unique()) loss_out['local_feature_variance_loss'] = (local_feature_variance_loss, len(proposals_idx)) loss_out['local_feature_distance_loss'] = (local_feature_distance_loss, len(proposals_idx)) loss_out['local_feature_instance_regression_loss'] = (local_feature_instance_regression_loss, len(proposals_idx)) loss += cfg.loss_weights['local_feature_variance_loss'] * local_feature_variance_loss + \ cfg.loss_weights['local_feature_distance_loss'] * local_feature_distance_loss + \ cfg.loss_weights['local_feature_instance_regression_loss'] * local_feature_instance_regression_loss if ('voxel_center_loss' in loss_inp.keys()) and (cfg.voxel_center_prediction['activate']): voxel_center_preds, voxel_center_probs_labels = loss_inp['voxel_center_loss'] voxel_center_loss = center_criterion(voxel_center_preds.view(-1), voxel_center_probs_labels) loss_out['voxel_center_loss'] = (voxel_center_loss, voxel_center_probs_labels.shape[0]) voxel_center_offset_preds, voxel_center_offset_labels, voxel_center_instance_labels = loss_inp['voxel_center_offset_loss'] voxel_diff = voxel_center_offset_preds - voxel_center_offset_labels # (N, 3) voxel_dist = torch.sum(torch.abs(voxel_diff), dim=-1) # (N) valid = (voxel_center_instance_labels != cfg.ignore_label).float() voxel_center_offset_norm_loss = torch.sum(voxel_dist * valid) / (torch.sum(valid) + 1e-6) voxel_gt_offsets_norm = torch.norm(voxel_center_offset_labels, p=2, dim=1) # (N), float voxel_gt_offsets_ = voxel_center_offset_labels / (voxel_gt_offsets_norm.unsqueeze(-1) + 1e-8) voxel_offsets_norm = torch.norm(voxel_center_offset_preds, p=2, dim=1) voxel_offsets_ = voxel_center_offset_preds / (voxel_offsets_norm.unsqueeze(-1) + 1e-8) voxel_direction_diff = - (voxel_gt_offsets_ * voxel_offsets_).sum(-1) # (N) voxel_center_offset_dir_loss = torch.sum(voxel_direction_diff * valid) / (torch.sum(valid) + 1e-6) loss_out['voxel_center_offset_norm_loss'] = (voxel_center_offset_norm_loss, valid.shape[0]) loss_out['voxel_center_offset_dir_loss'] = (voxel_center_offset_dir_loss, valid.shape[0]) voxel_center_semantic_preds, voxel_center_semantic_labels = loss_inp['voxel_center_semantic_loss'] voxel_center_semantic_loss = center_semantic_criterion(voxel_center_semantic_preds, voxel_center_semantic_labels) loss_out['voxel_center_semantic_loss'] = (voxel_center_semantic_loss, voxel_center_semantic_labels.shape[0]) loss += cfg.loss_weights['voxel_center_loss'] * voxel_center_loss + \ cfg.loss_weights['voxel_center_offset_norm_loss'] * voxel_center_offset_norm_loss + \ cfg.loss_weights['voxel_center_offset_dir_loss'] * voxel_center_offset_dir_loss + \ cfg.loss_weights['voxel_center_semantic_loss'] * voxel_center_semantic_loss return loss, loss_out, infos def get_segmented_scores(scores, fg_thresh=1.0, bg_thresh=0.0): ''' :param scores: (N), float, 0~1 :return: segmented_scores: (N), float 0~1, >fg_thresh: 1, <bg_thresh: 0, mid: linear '''
# coding: utf-8 """ InfluxDB OSS API Service. The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501 OpenAPI spec version: 2.0.0 Generated by: https://openapi-generator.tech """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six class ReplicationsService(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): # noqa: E501,D401,D403 """ReplicationsService - a operation defined in OpenAPI.""" if api_client is None: raise ValueError("Invalid value for `api_client`, must be defined.") self.api_client = api_client def delete_replication_by_id(self, replication_id, **kwargs): # noqa: E501,D401,D403 """Delete a replication. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_replication_by_id(replication_id, async_req=True) >>> result = thread.get() :param async_req bool :param str replication_id: (required) :param str zap_trace_span: OpenTracing span context :return: None If the method is called asynchronously, returns the request thread. """ # noqa: E501 kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_replication_by_id_with_http_info(replication_id, **kwargs) # noqa: E501 else: (data) = self.delete_replication_by_id_with_http_info(replication_id, **kwargs) # noqa: E501 return data def delete_replication_by_id_with_http_info(self, replication_id, **kwargs): # noqa: E501,D401,D403 """Delete a replication. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_replication_by_id_with_http_info(replication_id, async_req=True) >>> result = thread.get() :param async_req bool :param str replication_id: (required) :param str zap_trace_span: OpenTracing span context :return: None If the method is called asynchronously, returns the request thread. """ # noqa: E501 local_var_params = locals() all_params = ['replication_id', 'zap_trace_span'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') all_params.append('urlopen_kw') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method delete_replication_by_id" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'replication_id' is set if ('replication_id' not in local_var_params or local_var_params['replication_id'] is None): raise ValueError("Missing the required parameter `replication_id` when calling `delete_replication_by_id`") # noqa: E501 collection_formats = {} path_params = {} if 'replication_id' in local_var_params: path_params['replicationID'] = local_var_params['replication_id'] # noqa: E501 query_params = [] header_params = {} if 'zap_trace_span' in local_var_params: header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 # urlopen optional setting urlopen_kw = None if 'urlopen_kw' in kwargs: urlopen_kw = kwargs['urlopen_kw'] return self.api_client.call_api( '/api/v2/replications/{replicationID}', 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, urlopen_kw=urlopen_kw) def get_replication_by_id(self, replication_id, **kwargs): # noqa: E501,D401,D403 """Retrieve a replication. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_replication_by_id(replication_id, async_req=True) >>> result = thread.get() :param async_req bool :param str replication_id: (required) :param str zap_trace_span: OpenTracing span context :return: Replication If the method is called asynchronously, returns the request thread. """ # noqa: E501 kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_replication_by_id_with_http_info(replication_id, **kwargs) # noqa: E501 else: (data) = self.get_replication_by_id_with_http_info(replication_id, **kwargs) # noqa: E501 return data def get_replication_by_id_with_http_info(self, replication_id, **kwargs): # noqa: E501,D401,D403 """Retrieve a replication. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_replication_by_id_with_http_info(replication_id, async_req=True) >>> result = thread.get() :param async_req bool :param str replication_id: (required) :param str zap_trace_span: OpenTracing span context :return: Replication If the method is called asynchronously, returns the request thread. """ # noqa: E501 local_var_params = locals() all_params = ['replication_id', 'zap_trace_span'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') all_params.append('urlopen_kw') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_replication_by_id" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'replication_id' is set if ('replication_id' not in local_var_params or local_var_params['replication_id'] is None): raise ValueError("Missing the required parameter `replication_id` when calling `get_replication_by_id`") # noqa: E501 collection_formats = {} path_params = {} if 'replication_id' in local_var_params: path_params['replicationID'] = local_var_params['replication_id'] # noqa: E501 query_params = [] header_params = {} if 'zap_trace_span' in local_var_params: header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 # urlopen optional setting urlopen_kw = None if 'urlopen_kw' in kwargs: urlopen_kw = kwargs['urlopen_kw'] return self.api_client.call_api( '/api/v2/replications/{replicationID}', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Replication', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, urlopen_kw=urlopen_kw) def get_replications(self, org_id, **kwargs): # noqa: E501,D401,D403 """List all replications. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_replications(org_id, async_req=True) >>> result = thread.get() :param async_req bool :param str org_id: The organization ID. (required) :param str zap_trace_span: OpenTracing span context :param str name: :param str remote_id: :param str local_bucket_id: :return: Replications If the method is called asynchronously, returns the request thread. """ # noqa: E501 kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_replications_with_http_info(org_id, **kwargs) # noqa: E501 else: (data) = self.get_replications_with_http_info(org_id, **kwargs) # noqa: E501 return data def get_replications_with_http_info(self, org_id, **kwargs): # noqa: E501,D401,D403 """List all replications. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_replications_with_http_info(org_id, async_req=True) >>> result = thread.get() :param async_req bool :param str org_id: The organization ID. (required) :param str zap_trace_span: OpenTracing span context :param str name: :param str remote_id: :param str local_bucket_id: :return: Replications If the method is called asynchronously, returns the request thread. """ # noqa: E501 local_var_params = locals() all_params = ['org_id', 'zap_trace_span', 'name', 'remote_id', 'local_bucket_id'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') all_params.append('urlopen_kw') for key, val in six.iteritems(local_var_params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_replications" % key ) local_var_params[key] = val del local_var_params['kwargs'] # verify the required parameter 'org_id' is set if ('org_id' not in local_var_params or local_var_params['org_id'] is None): raise ValueError("Missing the required parameter `org_id` when calling `get_replications`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] if 'org_id' in local_var_params: query_params.append(('orgID', local_var_params['org_id'])) # noqa: E501 if 'name' in local_var_params: query_params.append(('name', local_var_params['name'])) # noqa: E501 if 'remote_id' in local_var_params: query_params.append(('remoteID', local_var_params['remote_id'])) # noqa: E501 if 'local_bucket_id' in local_var_params: query_params.append(('localBucketID', local_var_params['local_bucket_id'])) # noqa: E501 header_params = {} if 'zap_trace_span' in local_var_params: header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501 form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # Authentication setting auth_settings = [] # noqa: E501 # urlopen optional setting urlopen_kw = None if 'urlopen_kw' in kwargs: urlopen_kw = kwargs['urlopen_kw'] return self.api_client.call_api( '/api/v2/replications', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Replications', # noqa: E501 auth_settings=auth_settings, async_req=local_var_params.get('async_req'), _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501 _preload_content=local_var_params.get('_preload_content', True), _request_timeout=local_var_params.get('_request_timeout'), collection_formats=collection_formats, urlopen_kw=urlopen_kw) def patch_replication_by_id(self, replication_id, replication_update_request, **kwargs): # noqa: E501,D401,D403 """Update a replication. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_replication_by_id(replication_id, replication_update_request, async_req=True) >>> result = thread.get() :param async_req bool :param str replication_id: (required) :param ReplicationUpdateRequest replication_update_request: (required) :param str zap_trace_span: OpenTracing span context :param bool validate: If true, validate the updated information, but don't save it. :return: Replication If the method is called asynchronously, returns the request thread. """ # noqa: E501 kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.patch_replication_by_id_with_http_info(replication_id, replication_update_request, **kwargs) # noqa: E501 else: (data) = self.patch_replication_by_id_with_http_info(replication_id, replication_update_request, **kwargs) # noqa: E501 return data def patch_replication_by_id_with_http_info(self, replication_id, replication_update_request, **kwargs): # noqa: E501,D401,D403 """Update a replication. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_replication_by_id_with_http_info(replication_id, replication_update_request, async_req=True) >>> result = thread.get() :param async_req bool
#!/usr/bin/env python from mpl_toolkits.basemap import Basemap, cm # requires netcdf4-python (netcdf4-python.googlecode.com) from netCDF4 import Dataset as NetCDFFile import numpy as np import matplotlib.pyplot as plt from datetime import datetime as dt from datetime import timedelta as td import sys from scipy import stats from itertools import tee, izip #from numpy.polynomial.polynomial import polyfit def pairwise(iterable): "s -> (s0,s1), (s1,s2), (s2, s3), ..." a, b = tee(iterable) next(b, None) return izip(a, b) def load_regiondata(filename,states=True): """Reads a csv file which reports the country (and, optionally, the state) on a grid""" cs = open(filename,"r") #Empty lists to populate cs_lat = [] cs_lon = [] cs_country = [] if states: cs_state = [] for line in cs: line = line.strip() columns = line.split(",") cs_lat.append(float(columns[0])) cs_lon.append(float(columns[1])) cs_country.append( columns[2] ) if states: cs_state.append( columns[3] ) cs.close() #we can close the file here if states: return(cs_lat,cs_lon,cs_country,cs_state) else: return(cs_lat,cs_lon,cs_country) def region_matcher_fast(cs_lat,cs_lon,cs_country,cs_state,lat,lon,states=True,region="india"): """A fast version of region_matcher that only works for country_state.csv""" num_grid = len(lat) if num_grid == 1: single = True else: single = False #np arrays (cs_lat,cs_lon,lat,lon) = (np.array(cs_lat),np.array(cs_lon),np.array([lat]),np.array([lon])) #get i and j indexes if region=="india": j = np.around(np.divide((lat-2.),0.25)) j = np.maximum(j,np.zeros_like(j)) j = np.minimum(j,np.zeros_like(j)+144) i = np.around(np.divide((lon-65.),0.3125)) i = np.maximum(i,np.zeros_like(i)) i = np.minimum(i,np.zeros_like(i)+112) ind = np.add(113*j,i) elif region=="indonesia": j = np.around(np.divide((lat-(-12.)),0.25)) j = np.maximum(j,np.zeros_like(j)) j = np.minimum(j,np.zeros_like(j)+77) i = np.around(np.divide((lon-93.9375),0.3125)) i = np.maximum(i,np.zeros_like(i)) i = np.minimum(i,np.zeros_like(i)+154) ind = np.add(155*j,i) #print len(j) #print len(ind) #print np.min(ind) #print np.max(ind) #print len(cs_country) #print "got indexes" country_out = [] if states: state_out = [] for line in range(0,num_grid): if line % 1000 == 0: #print "Assigning %i of %i" %(line,num_grid) pass try: country_out.append(cs_country[int(ind[line])]) if states: state_out.append(cs_state[int(ind[line])]) except IndexError: print("line = %i" %line) print("ind[line] = %g" %ind[line]) raise ValueError if single: if states: return(country_out[0],state_out[0]) else: return(country_out[0]) else: if states: return(country_out,state_out) else: return(country_out) def frange(x, y, jump): while x <= y: yield x x += jump force_nest = True domain = [2.,38.,65.,100] #dev_from_av = False do_plot = True do_scatter = True do_prodloss = False cycle_type = "monthly" start_date = dt(2014,2,1) end_date = dt(2014,12,30) save_pre = '/geos/u28/scripts/GEOS-Chem_columns/for_GM/X1_newBC_' if cycle_type in ["daily","day","d"]: season_list = [start_date + td(days=i) for i in range((end_date-start_date).days+1)] #print season_list #sys.exit() elif cycle_type in ["monthly","month","m"]: season_list = [start_date + td(days=i) for i in range((end_date-start_date).days+1) if (start_date + td(days=i)).day == 1] elif cycle_type in ["year","y","all","a"]: season_list = ["year"] max_o3 = 1.0e18 do_country_mask = True country_statefile = "country_state.csv" #diffs = [["GC_O3_wAK_wpri",'sat_o3_wBC']] diffs = [] #ratios = [['GC_O3_GL',"GC_O3"]] ratios = [] #{quickname: [NC_fieldname, full_name,lower_plotbound,upper_plot_bound] fields_dict = { #'GC_O3_wAK' :['GC_O3 with OMI AKs' ,'GEOS-Chem, with OMI AKs, tropospheric ozone column (no prior)' ,0, max_o3], 'GC_O3_wAK_wpri':['GEOS O3 with OMI AKs inc prior','GEOS-Chem, with OMI AKs, tropspheric ozone column (with prior)',0.0e18, max_o3], #'GC_O3_wAK_SUBpri':['GEOS O3 with OMI AKs SUB prior','GEOS-Chem, with OMI AKs, tropspheric ozone column (with prior subtracted)',0, max_o3], #'MACC_O3' :['MACC O3' ,'MACC tropospheric ozone column' ,0, max_o3], #'MACC_O3_wAK' :['MACC O3 with OMI AKs' ,'MACC, with OMI AKs, tropospheric ozone column' ,0, max_o3], #'prior' :['prior' ,'Prior tropospheric O3 column' ,-0.5e18, 0.5e18], #'sat_o3' :['OMI O3' ,'OMI tropospheric ozone column' ,0, max_o3], #'sat_o3_wBC' :['OMI O3 with bias correction' ,'OMI tropospheric ozone column, with bias correction' ,0.0e18, max_o3], #'sat_o3_wBC_old' :['OMI O3 with OLD bias correction' ,'OMI tropospheric ozone column, with OLD bias correction' ,0.0e18, max_o3], #'GC_CO' :['GC_CO' ,'GEOS-Chem tropospheric CO column' ,0, 1.5e18], #'GC_CO_GL' :['GC_CO_GL' ,'GEOS-Chem ground-level CO mixing ratio' ,0, 4e-7 ], #'GC_NO' :['GC_NO' ,'GEOS-Chem tropospheric NO column' ,0, 1e16 ], #'GC_NO_GL' :['GC_NO_GL' ,'GEOS-Chem ground-level NO mixing ratio' ,0, 5e-9 ], #'GC_NO2' :['GC_NO2' ,'GEOS-Chem tropospheric NO2 column' ,0, 2.5e16], #'GC_NO2_GL' :['GC_NO2_GL' ,'GEOS-Chem ground-level NO2 mixing ratio' ,0, 5e-9 ], #'GC_NOx' :['GC_NOx' ,'GEOS-Chem tropospheric NOx column' ,0, 2.5e16], #'GC_NOx_GL' :['GC_NOx_GL' ,'GEOS-Chem ground-level NOx mixing ratio' ,0, 5e-9 ], #'GC_CH2O' :['GC_CH2O' ,'GEOS-Chem tropospheric HCHO column' ,0, 1.5e16], #'GC_CH2O_GL' :['GC_CH2O_GL' ,'GEOS-Chem ground-level HCHO mixing ratio' ,0, 2.5e-9], #'GC_O3' :['GC_O3' ,'GEOS-Chem tropospheric O3 column' ,0.0e18, max_o3], 'GC_O3_GL' :['GC_O3_GL' ,'GEOS-Chem ground-level O3 mixing ratio' ,0.e-9, 100.e-9] #'bias_corr' :['bias correction' ,'Bias correction applied' ,0, 3e17] #'prod_Ox' :['prod_Ox' ,'Ox production rate' ,0, 2.5e12], #'loss_Ox' :['loss_Ox' ,'Ox loss rate' ,0, 2.5e12] } (csv_lat,csv_lon,csv_country,csv_state) = \ load_regiondata(country_statefile,states=True) #get countries for points #define file nc = NetCDFFile('/geos/u28/scripts/GEOS-Chem_columns/NewBC_x1_monthly__%s-%s.nc'%(start_date.strftime('%Y%m%d'),end_date.strftime('%Y%m%d'))) #nc = NetCDFFile('/geos/u28/scripts/GEOS-Chem_columns/PL_goodsat_x1_new_2D_%s-%s.nc'%(start_date.strftime('%Y%m%d'),end_date.strftime('%Y%m%d'))) #spatial stuff latcorners = nc.variables['lat'][:] loncorners = -nc.variables['lon'][:] latres = latcorners[1] - latcorners[0] lonres = -loncorners[1] - -loncorners[0] #print latcorners #print loncorners #print latres #print lonres if force_nest: [minlat,maxlat,minlon,maxlon]=[2.,38.,65.,100] else: minlat = min(latcorners)-0.5*latres maxlat = max(latcorners)+0.5*latres minlon = min(-loncorners)-0.5*lonres maxlon = max(-loncorners)+0.5*lonres #get OMI mask plot_var = nc.variables['OMI O3'] out_india_mask = np.zeros(np.array(plot_var[0][:]).shape,dtype=bool) for i in range(len(out_india_mask)): this_lat = minlat + i*0.75 for j in range(len(out_india_mask[0])): this_lon = minlon + j*0.75 (this_country,this_state) = region_matcher_fast(csv_lat,csv_lon,csv_country,csv_state, [this_lat],[this_lon],region="india") #print this_lat,this_lon,this_country if do_country_mask: if this_country in ["India","Bangladesh"]: out_india_mask[i][j] = False else: out_india_mask[i][j] = True else: out_india_mask[i][j] = False top_mask = out_india_mask for t in range(len(season_list)): zero_or_nan_mask = np.logical_or(np.isnan(plot_var[t][:]),plot_var[t][:] == 0.) top_mask = np.logical_or(zero_or_nan_mask,top_mask) #update mask for t in range(len(season_list)): print(t) #text for filename and headers if season_list[t] == "JFM": date_text = '20140101-20140331' elif season_list[t] == "AMJ": date_text = '20140401-20140630' elif season_list[t] == "JAS": date_text = '20140701-20140930' elif season_list[t] == "OND": date_text = '20141001-20141230' elif season_list[t] in ["year","y","all","a"]: date_text = start_date.strftime('%Y%m%d')+"-"+end_date.strftime('%Y%m%d') else: #month or daily cycle if cycle_type in ["daily","day","d"]: date_text = season_list[t].strftime('%Y%m%d') elif cycle_type in ["monthly","month","m"]: date_text = season_list[t].strftime('%Y%m') else: #we shouldn't get here raise IOError("failure to set date_text") print(date_text) #Now, loop through fields: for field_QN in fields_dict: [field_NC,field_full,lower,upper] = fields_dict[field_QN] plot_var = nc.variables[field_NC] data = plot_var[t][:] #print "data shape" #print np.array(data).shape #out_india_mask = np.zeros(np.array(data).shape,dtype=bool) #for i in range(len(out_india_mask)): # this_lat = minlat + i*0.75 # for j in range(len(out_india_mask[0])): # this_lon = minlon + j*0.75 # (this_country,this_state) = region_matcher_fast(csv_lat,csv_lon,csv_country,csv_state, # [this_lat],[this_lon],region="india") # #print this_lat,this_lon,this_country # if this_country in ["India","Bangladesh"]: # out_india_mask[i][j] = False # else: # out_india_mask[i][j] = True #mask with same data for now mask = plot_var[t][:] units = plot_var.units #mask data_m = np.ma.masked_where(top_mask,data) print("%s : average : %g" %(field_full,np.nanmean(data_m.flatten()))) if do_plot: #basic draw map stuff fig = plt.figure(figsize=(8,8)) ax = fig.add_axes([0.1,0.1,0.8,0.8]) # create Mercator Projection Basemap instance. m = Basemap(projection='merc',llcrnrlat=minlat-1.75,urcrnrlat=maxlat-1.75,\ llcrnrlon=minlon-0.75,urcrnrlon=maxlon-0.75,lat_ts=20,resolution='i') # draw coastlines, state and country boundaries, edge of map. m.drawcoastlines() #m.drawstates() m.drawcountries() # draw parallels. parallels = np.arange(-90.,90,5.) m.drawparallels(parallels,labels=[1,0,0,0],fontsize=16) # draw meridians meridians = np.arange(-180.,180.,5.) m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=16) ny = data.shape[0]; nx = data.shape[1] lons, lats = m.makegrid(nx, ny) # get lat/lons of ny by nx evenly space grid. x, y = m(lons, lats) # compute map proj coordinates. #colour bar YlGn #(lower,upper) = (np.nanmin(np.array(data).flatten()),np.nanmax(np.array(data).flatten())) step = (upper-lower)/20. clevs = list(frange(lower,upper,step)) cbar_text = units #main fig #colormap if field_QN == "prod_Ox": cmap = plt.cm.BuPu elif field_QN == "loss_Ox": cmap = plt.cm.YlGn elif field_QN == "prior": cmap = plt.cm.coolwarm elif field_QN.endswith("_GL"): cmap = plt.cm.inferno else: cmap = plt.cm.nipy_spectral #plot it cs = m.pcolormesh(x,y,data_m,vmin=min(clevs),vmax=max(clevs),cmap=cmap) cbar = m.colorbar(cs,location='bottom',pad="5%") cbar.set_label(cbar_text,fontsize=10) #title plt.title("%s - %s"%(field_full,date_text),fontsize=14) #save plt.savefig("%s_%s_%s"%(save_pre,field_QN,date_text)) plt.close() if field_QN == "prod_Ox": prod_Ox_data = data prod_Ox_mask = mask if field_QN == "loss_Ox": loss_Ox_data = data loss_Ox_mask = mask if do_scatter: for xfield_QN, yfield_QN in pairwise(fields_dict): [xfield_NC,xfield_full,xlower,xupper] = fields_dict[xfield_QN] [yfield_NC,yfield_full,ylower,yupper] = fields_dict[yfield_QN] xaxis_var = nc.variables[xfield_NC] yaxis_var = nc.variables[yfield_NC] xdata = np.array(xaxis_var[t][:]).flatten() ydata = np.array(yaxis_var[t][:]).flatten() fig = plt.figure(figsize=(8,8)) plt.scatter(xdata,ydata,alpha=0.2) plt.title("%s"%(date_text),fontsize=14) plt.xlabel("%s / %s" %(xfield_full,xaxis_var.units)) plt.ylabel("%s / %s" %(yfield_full,yaxis_var.units)) plt.axis([xlower, xupper, ylower, yupper]) plt.grid(True) #for stats, strip out wherever either is nan xdata_clean = xdata[np.logical_not(np.isnan(np.multiply(xdata,ydata)))] ydata_clean = ydata[np.logical_not(np.isnan(np.multiply(xdata,ydata)))] slope, intercept, r_value, p_value, std_err = stats.linregress(xdata_clean, ydata_clean) print("m = %g , c = %g , rsq = %g "%(slope,intercept,r_value*r_value)) plt.plot([xlower,xupper], [intercept + slope * xlower,intercept + slope * xupper], 'r-') plt.annotate('y = %.2f x + %.2E , r2 = %.2f'%(slope,intercept,r_value*r_value), (0,0), (0, -45), xycoords='axes fraction', textcoords='offset points', va='top') plt.savefig("%s_Scatter_X-%s_Y-%s_%s"%(save_pre,xfield_QN,yfield_QN,date_text)) plt.close() if do_prodloss: #prodloss #=====do net Ox=========== field_QN = "net_Ox" [field_NC,field_full,lower,upper] = ["net_Ox","Net chemical Ox production/loss",-2.5e12,2.5e12] data = prod_Ox_data - loss_Ox_data mask = prod_Ox_mask units = "molec/cm3/s" #basic draw map stuff fig = plt.figure(figsize=(8,8)) ax = fig.add_axes([0.1,0.1,0.8,0.8]) # create Mercator Projection Basemap instance. m = Basemap(projection='merc',llcrnrlat=minlat,urcrnrlat=maxlat,\ llcrnrlon=minlon,urcrnrlon=maxlon,lat_ts=20,resolution='i') # draw coastlines, state and country boundaries, edge of
<gh_stars>1-10 #!/usr/bin/env python3 # -*- coding: utf-8 -*- # """RAMSES RF - a RAMSES-II protocol decoder & analyser.""" import logging from random import randint, randrange from types import SimpleNamespace from typing import Dict, Optional from .const import ( _000C_DEVICE, _0005_ZONE, ATTR_HEAT_DEMAND, ATTR_RELAY_DEMAND, ATTR_SETPOINT, ATTR_TEMP, ATTR_WINDOW_OPEN, BOOST_TIMER, DEV_KLASS, DOMAIN_TYPE_MAP, FAN_MODE, NUL_DEVICE_ID, Discover, __dev_mode__, ) from .entities import Entity, class_by_attr, discover_decorator from .protocol import Command, CorruptStateError, Priority from .protocol.address import NON_DEV_ADDR, id_to_address from .protocol.command import FUNC, TIMEOUT from .protocol.opentherm import ( MSG_ID, MSG_NAME, MSG_TYPE, OPENTHERM_MESSAGES, PARAMS_MSG_IDS, SCHEMA_MSG_IDS, STATUS_MSG_IDS, VALUE, ) from .protocol.ramses import CODE_ONLY_FROM_CTL, RAMSES_DEVICES from .protocol.transport import PacketProtocolPort from .schema import SZ_ALIAS, SZ_CLASS, SZ_DEVICE_ID, SZ_FAKED from .protocol import I_, RP, RQ, W_ # noqa: F401, isort: skip from .protocol import ( # noqa: F401, isort: skip _0001, _0002, _0004, _0005, _0006, _0008, _0009, _000A, _000C, _000E, _0016, _0100, _0150, _01D0, _01E9, _0404, _0418, _042F, _0B04, _1030, _1060, _1081, _1090, _1098, _10A0, _10B0, _10E0, _10E1, _1100, _1260, _1280, _1290, _1298, _12A0, _12B0, _12C0, _12C8, _12F0, _1300, _1F09, _1F41, _1FC9, _1FD0, _1FD4, _2249, _22C9, _22D0, _22D9, _22F1, _22F3, _2309, _2349, _2389, _2400, _2401, _2410, _2420, _2D49, _2E04, _30C9, _3120, _313F, _3150, _31D9, _31DA, _31E0, _3200, _3210, _3220, _3221, _3223, _3B00, _3EF0, _3EF1, _PUZZ, ) DEFAULT_BDR_ID = "13:000730" DEFAULT_EXT_ID = "17:000730" DEFAULT_THM_ID = "03:000730" BindState = SimpleNamespace( UNKNOWN=None, UNBOUND="unb", # unbound LISTENING="l", # waiting for offer OFFERING="of", # waiting for accept: -> sent offer ACCEPTING="a", # waiting for confirm: rcvd offer -> sent accept # NFIRMED="c", # bound: rcvd accept -> sent confirm BOUND="bound", # bound: rcvd confirm ) # DHW/THM, TRV -> CTL (temp, valve_position), or: # CTL -> BDR/OTB (heat_demand) # REQUEST -> WAITING # unbound -- unbound # unbound -- listening # offering -> listening # offering <- accepting # (confirming) bound -> accepting # bound -- bound DEV_MODE = __dev_mode__ _LOGGER = logging.getLogger(__name__) if DEV_MODE: _LOGGER.setLevel(logging.DEBUG) class DeviceBase(Entity): """The Device base class (good for a generic device).""" _DEV_KLASS = None _DEV_TYPES = tuple() # TODO: needed? _STATE_ATTR = None def __init__(self, gwy, dev_addr, ctl=None, domain_id=None, **kwargs) -> None: _LOGGER.debug("Creating a Device: %s (%s)", dev_addr.id, self.__class__) super().__init__(gwy) self.id = dev_addr.id if self.id in gwy.device_by_id: raise LookupError(f"Duplicate device: {self.id}") gwy.device_by_id[self.id] = self gwy.devices.append(self) self._ctl = self._set_ctl(ctl) if ctl else None self._domain_id = domain_id self._parent = None self.addr = dev_addr self.type = dev_addr.type # DEX # TODO: remove this attr self.devices = [] # [self] self.device_by_id = {} # {self.id: self} self._iz_controller = None self._alias = None self._faked = None if self.id in gwy._include: self._alias = gwy._include[self.id].get(SZ_ALIAS) def __repr__(self) -> str: if self._STATE_ATTR: return f"{self.id} ({self._domain_id}): {getattr(self, self._STATE_ATTR)}" return f"{self.id} ({self._domain_id})" def __str__(self) -> str: return self.id if self._klass is DEV_KLASS.DEV else f"{self.id} ({self._klass})" def __lt__(self, other) -> bool: if not hasattr(other, "id"): return NotImplemented return self.id < other.id def _start_discovery(self) -> None: delay = randint(10, 20) self._gwy._add_task( # 10E0/1FC9, 3220 pkts self._discover, discover_flag=Discover.SCHEMA, delay=0, period=3600 * 24 ) self._gwy._add_task( self._discover, discover_flag=Discover.PARAMS, delay=delay, period=3600 * 6 ) self._gwy._add_task( self._discover, discover_flag=Discover.STATUS, delay=delay + 1, period=60 ) @discover_decorator def _discover(self, discover_flag=Discover.ALL) -> None: # sometimes, battery-powered devices will respond to an RQ (e.g. bind mode) if discover_flag & Discover.SCHEMA: self._make_cmd(_1FC9, retries=3) # rf_bind if discover_flag & Discover.STATUS: self._make_cmd(_0016, retries=3) # rf_check def _make_cmd(self, code, payload="00", **kwargs) -> None: super()._make_cmd(code, self.id, payload, **kwargs) def _set_ctl(self, ctl) -> None: # self._ctl """Set the device's parent controller, after validating it.""" if self._ctl is ctl: return if self._is_controller and not isinstance(self, UfhController): # HACK: UFC is/binds to a contlr return if self._ctl is not None: raise CorruptStateError(f"{self} changed controller: {self._ctl} to {ctl}") # I --- 01:078710 --:------ 01:144246 1F09 003 FF04B5 # has been seen if not isinstance(ctl, Controller) and not ctl._is_controller: raise TypeError(f"Device {ctl} is not a controller") self._ctl = ctl ctl.device_by_id[self.id] = self ctl.devices.append(self) _LOGGER.debug("%s: controller now set to %s", self, ctl) return ctl def _handle_msg(self, msg) -> None: assert msg.src is self, f"msg inappropriately routed to {self}" super()._handle_msg(msg) if msg.verb != I_: # or: if self._iz_controller is not None or... return if not self._iz_controller and msg.code in CODE_ONLY_FROM_CTL: if self._iz_controller is None: _LOGGER.info(f"{msg._pkt} # IS_CONTROLLER (00): is TRUE") self._make_tcs_controller(msg) elif self._iz_controller is False: # TODO: raise CorruptStateError _LOGGER.error(f"{msg._pkt} # IS_CONTROLLER (01): was FALSE, now True") @property def has_battery(self) -> Optional[bool]: # 1060 """Return True if a device is battery powered (excludes battery-backup).""" return isinstance(self, BatteryState) or _1060 in self._msgz @property def _is_controller(self) -> Optional[bool]: if self._iz_controller is not None: return bool(self._iz_controller) # True, False, or msg if self._ctl is not None: # TODO: messy return self._ctl is self return False # @property # def _is_parent(self) -> bool: # """Return True if other devices can bind to this device.""" # return self._klass in (DEV_KLASS.CTL, DEV_KLASS.PRG, DEV_KLASS.UFC) @property def _is_present(self) -> bool: """Try to exclude ghost devices (as caused by corrupt packet addresses).""" return any( m.src == self for m in self._msgs.values() if not m._expired ) # TODO: needs addressing @property def _klass(self) -> str: return self._DEV_KLASS def _make_tcs_controller(self, msg=None, **kwargs): # CH/DHW """Create a TCS, and attach it to this controller.""" from .systems import create_system # HACK: needs sorting self._iz_controller = msg or True if self.type in ("01", "12", "22", "23", "34") and self._evo is None: # DEX self._evo = create_system(self._gwy, self, **kwargs) @property def schema(self) -> dict: """Return the fixed attributes of the device (e.g. TODO).""" return { **(self._codes if DEV_MODE else {}), SZ_ALIAS: self._alias, # SZ_FAKED: self._faked, SZ_CLASS: self._klass, } @property def params(self): """Return the configurable attributes of the device.""" return {} @property def status(self): """Return the state attributes of the device.""" return {} class DeviceInfo: # 10E0 RF_BIND = "rf_bind" DEVICE_INFO = "device_info" def _discover(self, discover_flag=Discover.ALL) -> None: if discover_flag & Discover.SCHEMA: if not self._msgs.get(_10E0) and ( self._klass not in RAMSES_DEVICES or RP in RAMSES_DEVICES[self._klass].get(_10E0, {}) ): self._make_cmd(_10E0, retries=3) @property def device_info(self) -> Optional[dict]: # 10E0 return self._msg_value(_10E0) @property def schema(self) -> dict: result = super().schema result.update({self.RF_BIND: self._msg_value(_1FC9)}) if _10E0 in self._msgs or _10E0 in RAMSES_DEVICES.get(self._klass, []): result.update({self.DEVICE_INFO: self.device_info}) return result class Device(DeviceInfo, DeviceBase): """The Device base class - also used for unknown device types.""" _DEV_KLASS = DEV_KLASS.DEV _DEV_TYPES = tuple() def _handle_msg(self, msg) -> None: super()._handle_msg(msg) if type(self) is Device and self.type == "30": # self.__class__ is Device, DEX # TODO: the RFG codes need checking if msg.code in (_31D9, _31DA, _31E0) and msg.verb in (I_, RP): self.__class__ = HvacVentilator elif msg.code in (_0006, _0418, _3220) and msg.verb == RQ: self.__class__ = RfgGateway elif msg.code in (_313F,) and msg.verb == W_: self.__class__ = RfgGateway if not msg._gwy.config.enable_eavesdrop: return if ( self._ctl is not None and "zone_idx" in msg.payload and msg.src.type != "01" # TODO: DEX, should be: if controller # and msg.dst.type != "18" ): # TODO: is buggy - remove? how? self._set_parent(self._ctl._evo._get_zone(msg.payload["zone_idx"])) def _set_parent(self, parent, domain=None) -> None: # self._parent """Set the device's parent zone, after validating it. There are three possible sources for the parent zone of a device: 1. a 000C packet (from their controller) for actuators only 2. a message.payload["zone_idx"] 3. the sensor-matching algorithm for zone sensors only Devices don't have parents, rather: Zones have children; a mis-configured system could have a device as a child of two domains. """ # NOTE: these imports are here to prevent circular references from .systems import System from .zones import DhwZone, Zone if self._parent is not None and self._parent is not parent: raise CorruptStateError( f"{self} changed parent: {self._parent} to {parent}, " ) if isinstance(parent, Zone): if domain and domain != parent.idx: raise TypeError(f"{self}: domain must be {parent.idx}, not {domain}") domain = parent.idx elif isinstance(parent, DhwZone): # usu. FA if domain not in ("F9", "FA"): # may not be known if eavesdrop'd raise TypeError(f"{self}: domain must be F9 or FA, not {domain}") elif isinstance(parent, System): # usu. FC if domain != "FC": # was: not in ("F9", "FA", "FC", "HW"): raise TypeError(f"{self}: domain must be FC, not {domain}") else: raise TypeError(f"{self}: parent must be System, DHW or Zone, not {parent}") self._set_ctl(parent._ctl) self._parent = parent self._domain_id = domain if hasattr(parent, "devices") and self not in parent.devices: parent.devices.append(self) parent.device_by_id[self.id] = self _LOGGER.debug("Device %s: parent now set to
await asyncio.shield(self.extract_rootfs()) log.info(f'Added expanded image to cache: {self.image_ref_str}, ID: {self.image_id}') with self.step('pulling'): await self.run_until_done_or_deleted(localize_rootfs) with self.step('setting up overlay'): await self.run_until_done_or_deleted(self.setup_overlay) with self.step('setting up network'): await self.run_until_done_or_deleted(self.setup_network_namespace) with self.step('running'): timed_out = await self.run_until_done_or_deleted(self.run_container) self.container_status = await self.get_container_status() with self.step('uploading_log'): await self.upload_log() if timed_out: self.short_error = 'timed out' raise JobTimeoutError(f'timed out after {self.timeout}s') if self.container_status['exit_code'] == 0: self.state = 'succeeded' else: if self.container_status['out_of_memory']: self.short_error = 'out of memory' self.state = 'failed' except asyncio.CancelledError: raise except Exception as e: if not isinstance(e, (JobDeletedError, JobTimeoutError)) and not user_error(e): log.exception(f'while running {self}') self.state = 'error' self.error = traceback.format_exc() finally: try: await self.delete_container() finally: if self.image_id: worker.image_data[self.image_id] -= 1 async def run_until_done_or_deleted(self, f: Callable[[], Awaitable[Any]]): step = asyncio.ensure_future(f()) deleted = asyncio.ensure_future(self.deleted_event.wait()) try: await asyncio.wait([deleted, step], return_when=asyncio.FIRST_COMPLETED) if deleted.done(): raise JobDeletedError() assert step.done() return step.result() finally: for t in (step, deleted): if t.done(): e = t.exception() if e and not user_error(e): log.exception(e) else: t.cancel() def is_job_deleted(self) -> bool: return self.job.deleted def step(self, name: str): return self.timings.step(name) async def pull_image(self): is_google_image = is_google_registry_domain(self.image_ref.domain) is_public_image = self.image_ref.name() in PUBLIC_IMAGES try: if not is_google_image: await self.ensure_image_is_pulled() elif is_public_image: auth = await self.batch_worker_access_token() await self.ensure_image_is_pulled(auth=auth) else: # Pull to verify this user has access to this # image. # FIXME improve the performance of this with a # per-user image cache. auth = self.current_user_access_token() await docker_call_retry(MAX_DOCKER_IMAGE_PULL_SECS, f'{self}')( docker.images.pull, self.image_ref_str, auth=auth ) except DockerError as e: if e.status == 404 and 'pull access denied' in e.message: self.short_error = 'image cannot be pulled' elif 'not found: manifest unknown' in e.message: self.short_error = 'image not found' raise image_config, _ = await check_exec_output('docker', 'inspect', self.image_ref_str) image_configs[self.image_ref_str] = json.loads(image_config)[0] async def ensure_image_is_pulled(self, auth=None): try: await docker_call_retry(MAX_DOCKER_OTHER_OPERATION_SECS, f'{self}')(docker.images.get, self.image_ref_str) except DockerError as e: if e.status == 404: await docker_call_retry(MAX_DOCKER_IMAGE_PULL_SECS, f'{self}')( docker.images.pull, self.image_ref_str, auth=auth ) else: raise async def batch_worker_access_token(self): async with await request_retry_transient_errors( self.client_session, 'POST', 'http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token', headers={'Metadata-Flavor': 'Google'}, timeout=aiohttp.ClientTimeout(total=60), ) as resp: access_token = (await resp.json())['access_token'] return {'username': 'oauth2accesstoken', 'password': access_token} def current_user_access_token(self): key = base64.b64decode(self.job.gsa_key['key.json']).decode() return {'username': '_json_key', 'password': key} async def extract_rootfs(self): assert self.rootfs_path os.makedirs(self.rootfs_path) await check_shell( f'id=$(docker create {self.image_id}) && docker export $id | tar -C {self.rootfs_path} -xf - && docker rm $id' ) log.info(f'Extracted rootfs for image {self.image_ref_str}') async def setup_overlay(self): lower_dir = self.rootfs_path upper_dir = f'{self.container_overlay_path}/upper' work_dir = f'{self.container_overlay_path}/work' merged_dir = f'{self.container_overlay_path}/merged' for d in (upper_dir, work_dir, merged_dir): os.makedirs(d) await check_shell( f'mount -t overlay overlay -o lowerdir={lower_dir},upperdir={upper_dir},workdir={work_dir} {merged_dir}' ) self.overlay_mounted = True async def setup_network_namespace(self): network = self.spec.get('network') if network is None or network is True: self.netns = await network_allocator.allocate_public() else: assert network == 'private' self.netns = await network_allocator.allocate_private() if self.port is not None: self.host_port = await port_allocator.allocate() await self.netns.expose_port(self.port, self.host_port) async def run_container(self) -> bool: self.started_at = time_msecs() try: await self.write_container_config() async with async_timeout.timeout(self.timeout): with open(self.log_path, 'w') as container_log: log.info(f'Creating the crun run process for {self}') self.process = await asyncio.create_subprocess_exec( 'crun', 'run', '--bundle', f'{self.container_overlay_path}/merged', '--config', f'{self.config_path}/config.json', self.container_name, stdout=container_log, stderr=container_log, ) await self.process.wait() log.info(f'crun process completed for {self}') except asyncio.TimeoutError: return True finally: self.finished_at = time_msecs() return False async def write_container_config(self): os.makedirs(self.config_path) with open(f'{self.config_path}/config.json', 'w') as f: f.write(json.dumps(await self.container_config())) # https://github.com/opencontainers/runtime-spec/blob/master/config.md async def container_config(self): uid, gid = await self._get_in_container_user() weight = worker_fraction_in_1024ths(self.spec['cpu']) workdir = self.image_config['Config']['WorkingDir'] default_docker_capabilities = [ 'CAP_CHOWN', 'CAP_DAC_OVERRIDE', 'CAP_FSETID', 'CAP_FOWNER', 'CAP_MKNOD', 'CAP_NET_RAW', 'CAP_SETGID', 'CAP_SETUID', 'CAP_SETFCAP', 'CAP_SETPCAP', 'CAP_NET_BIND_SERVICE', 'CAP_SYS_CHROOT', 'CAP_KILL', 'CAP_AUDIT_WRITE', ] config = { 'ociVersion': '1.0.1', 'root': { 'path': '.', 'readonly': False, }, 'hostname': self.netns.hostname, 'mounts': self._mounts(uid, gid), 'process': { 'user': { # uid/gid *inside the container* 'uid': uid, 'gid': gid, }, 'args': self.spec['command'], 'env': self._env(), 'cwd': workdir if workdir != "" else "/", 'capabilities': { 'bounding': default_docker_capabilities, 'effective': default_docker_capabilities, 'inheritable': default_docker_capabilities, 'permitted': default_docker_capabilities, }, }, 'linux': { 'namespaces': [ {'type': 'pid'}, { 'type': 'network', 'path': f'/var/run/netns/{self.netns.network_ns_name}', }, {'type': 'mount'}, {'type': 'ipc'}, {'type': 'uts'}, {'type': 'cgroup'}, ], 'uidMappings': [], 'gidMappings': [], 'resources': { 'cpu': {'shares': weight}, 'memory': { 'limit': self.spec['memory'], 'reservation': self.spec['memory'], }, # 'blockIO': {'weight': min(weight, 1000)}, FIXME blkio.weight not supported }, 'maskedPaths': [ '/proc/asound', '/proc/acpi', '/proc/kcore', '/proc/keys', '/proc/latency_stats', '/proc/timer_list', '/proc/timer_stats', '/proc/sched_debug', '/proc/scsi', '/sys/firmware', ], 'readonlyPaths': [ '/proc/bus', '/proc/fs', '/proc/irq', '/proc/sys', '/proc/sysrq-trigger', ], }, } if self.spec.get('unconfined'): config['linux']['maskedPaths'] = [] config['linux']['readonlyPaths'] = [] config['process']['apparmorProfile'] = 'unconfined' config['linux']['seccomp'] = {'defaultAction': "SCMP_ACT_ALLOW"} return config async def _get_in_container_user(self): user = self.image_config['Config']['User'] if not user: uid, gid = 0, 0 elif ":" in user: uid, gid = user.split(":") else: uid, gid = await self._read_user_from_rootfs(user) return int(uid), int(gid) async def _read_user_from_rootfs(self, user) -> Tuple[str, str]: with open(f'{self.rootfs_path}/etc/passwd', 'r') as passwd: for record in passwd: if record.startswith(user): _, _, uid, gid, _, _, _ = record.split(":") return uid, gid raise ValueError("Container user not found in image's /etc/passwd") def _mounts(self, uid, gid): # Only supports empty volumes external_volumes = [] volumes = self.image_config['Config']['Volumes'] if volumes: for v_container_path in volumes: if not v_container_path.startswith('/'): v_container_path = '/' + v_container_path v_host_path = f'{self.container_scratch}/volumes{v_container_path}' os.makedirs(v_host_path) if uid != 0 or gid != 0: os.chown(v_host_path, uid, gid) external_volumes.append( { 'source': v_host_path, 'destination': v_container_path, 'type': 'none', 'options': ['rbind', 'rw', 'shared'], } ) return ( self.spec.get('volume_mounts') + external_volumes + [ # Recommended filesystems: # https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-filesystems { 'source': 'proc', 'destination': '/proc', 'type': 'proc', 'options': ['nosuid', 'noexec', 'nodev'], }, { 'source': 'tmpfs', 'destination': '/dev', 'type': 'tmpfs', 'options': ['nosuid', 'strictatime', 'mode=755', 'size=65536k'], }, { 'source': 'sysfs', 'destination': '/sys', 'type': 'sysfs', 'options': ['nosuid', 'noexec', 'nodev', 'ro'], }, { 'source': 'cgroup', 'destination': '/sys/fs/cgroup', 'type': 'cgroup', 'options': ['nosuid', 'noexec', 'nodev', 'ro'], }, { 'source': 'devpts', 'destination': '/dev/pts', 'type': 'devpts', 'options': ['nosuid', 'noexec', 'nodev'], }, { 'source': 'mqueue', 'destination': '/dev/mqueue', 'type': 'mqueue', 'options': ['nosuid', 'noexec', 'nodev'], }, { 'source': 'shm', 'destination': '/dev/shm', 'type': 'tmpfs', 'options': ['nosuid', 'noexec', 'nodev', 'mode=1777', 'size=67108864'], }, { 'source': f'/etc/netns/{self.netns.network_ns_name}/resolv.conf', 'destination': '/etc/resolv.conf', 'type': 'none', 'options': ['rbind', 'ro'], }, { 'source': f'/etc/netns/{self.netns.network_ns_name}/hosts', 'destination': '/etc/hosts', 'type': 'none', 'options': ['rbind', 'ro'], }, ] ) def _env(self): env = self.image_config['Config']['Env'] + self.spec.get('env', []) if self.port is not None: assert self.host_port is not None env.append(f'HAIL_BATCH_WORKER_PORT={self.host_port}') env.append(f'HAIL_BATCH_WORKER_IP={IP_ADDRESS}') return env async def delete_container(self): if self.container_is_running(): try: log.info(f'{self} container is still running, killing crun process') try: await check_exec_output('crun', 'kill', '--all', self.container_name, 'SIGKILL') except CalledProcessError as e: not_extant_message = ( b'error opening file `/run/crun/' + self.container_name.encode() + b'/status`: No such file or directory') if not (e.returncode == 1 and not_extant_message in e.stderr): log.exception(f'while deleting container {self}', exc_info=True) finally: try: await send_signal_and_wait(self.process, 'SIGTERM', timeout=5) except asyncio.TimeoutError: try: await send_signal_and_wait(self.process, 'SIGKILL', timeout=5) except asyncio.CancelledError: raise except Exception: log.exception(f'could not kill process for container {self}') finally: self.process = None if self.overlay_mounted: try: await check_shell(f'umount -l {self.container_overlay_path}/merged') self.overlay_mounted = False except asyncio.CancelledError: raise except Exception: log.exception(f'while unmounting overlay in {self}', exc_info=True) if self.host_port is not None: port_allocator.free(self.host_port) self.host_port = None if self.netns: network_allocator.free(self.netns) self.netns = None async def delete(self): log.info(f'deleting {self}') self.deleted_event.set() # { # name: str, # state: str, (pending, pulling, creating, starting, running, uploading_log, deleting, suceeded, error, failed) # timing: dict(str, float), # error: str, (optional) # short_error: str, (optional) # container_status: { # state: str, # started_at: int, (date) # finished_at: int, (date) # out_of_memory: bool, # exit_code: int # } # } async def status(self, state=None): if not state: state = self.state status = {'name': self.name, 'state': state, 'timing': self.timings.to_dict()} if self.error: status['error'] = self.error if self.short_error: status['short_error'] = self.short_error if self.container_status: status['container_status'] = self.container_status elif self.container_is_running(): status['container_status'] = await self.get_container_status() return status async def get_container_status(self): if not self.process: return None status = { 'started_at': self.started_at, 'finished_at': self.finished_at, } if self.container_is_running(): status['state'] = 'running' status['out_of_memory'] = False else: status['state'] = 'finished' status['exit_code'] = self.process.returncode status['out_of_memory'] = self.process.returncode == 137 return status def container_is_running(self): return self.process is not None and self.process.returncode is None def container_finished(self): return self.process is not None and self.process.returncode is not None async def upload_log(self): await worker.file_store.write_log_file( self.job.format_version, self.job.batch_id, self.job.job_id, self.job.attempt_id, self.name, await self.get_log(), ) async def get_log(self): if os.path.exists(self.log_path): stream = await self.fs.open(self.log_path) async with stream: return (await stream.read()).decode() return '' def __str__(self): return f'container {self.job.id}/{self.name}' def populate_secret_host_path(host_path, secret_data): os.makedirs(host_path, exist_ok=True) if secret_data is not None: for filename, data in secret_data.items(): with open(f'{host_path}/{filename}',
gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. Defined in ../src/operator/regression_output.cc:L152 Parameters ---------- data : Symbol Input data to the function. label : Symbol Input label to the function. grad_scale : float, optional, default=1 Scale the gradient by a float factor name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def MAERegressionOutput(data=None, label=None, grad_scale=_Null, name=None, attr=None, out=None, **kwargs): r"""Computes mean absolute error of the input. MAE is a risk metric corresponding to the expected value of the absolute error. If :math:`\hat{y}_i` is the predicted value of the i-th sample, and :math:`y_i` is the corresponding target value, then the mean absolute error (MAE) estimated over :math:`n` samples is defined as :math:`\text{MAE}(\textbf{Y}, \hat{\textbf{Y}} ) = \frac{1}{n} \sum_{i=0}^{n-1} \lVert \textbf{y}_i - \hat{\textbf{y}}_i \rVert_1` .. note:: Use the MAERegressionOutput as the final output layer of a net. The storage type of ``label`` can be ``default`` or ``csr`` - MAERegressionOutput(default, default) = default - MAERegressionOutput(default, csr) = default By default, gradients of this loss function are scaled by factor `1/m`, where m is the number of regression outputs of a training example. The parameter `grad_scale` can be used to change this scale to `grad_scale/m`. Defined in ../src/operator/regression_output.cc:L120 Parameters ---------- data : Symbol Input data to the function. label : Symbol Input label to the function. grad_scale : float, optional, default=1 Scale the gradient by a float factor name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def _contrib_round_ste(data=None, name=None, attr=None, out=None, **kwargs): r"""Straight-through-estimator of `round()`. In forward pass, returns element-wise rounded value to the nearest integer of the input (same as `round()`). In backward pass, returns gradients of ``1`` everywhere (instead of ``0`` everywhere as in `round()`): :math:`\frac{d}{dx}{round\_ste(x)} = 1` vs. :math:`\frac{d}{dx}{round(x)} = 0`. This is useful for quantized training. Reference: Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation. Example:: x = round_ste([-1.5, 1.5, -1.9, 1.9, 2.7]) x.backward() x = [-2., 2., -2., 2., 3.] x.grad() = [1., 1., 1., 1., 1.] The storage type of ``round_ste`` output depends upon the input storage type: - round_ste(default) = default - round_ste(row_sparse) = row_sparse - round_ste(csr) = csr Defined in ../src/operator/contrib/stes_op.cc:L55 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def _contrib_sign_ste(data=None, name=None, attr=None, out=None, **kwargs): r"""Straight-through-estimator of `sign()`. In forward pass, returns element-wise sign of the input (same as `sign()`). In backward pass, returns gradients of ``1`` everywhere (instead of ``0`` everywhere as in ``sign()``): :math:`\frac{d}{dx}{sign\_ste(x)} = 1` vs. :math:`\frac{d}{dx}{sign(x)} = 0`. This is useful for quantized training. Reference: Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation. Example:: x = sign_ste([-2, 0, 3]) x.backward() x = [-1., 0., 1.] x.grad() = [1., 1., 1.] The storage type of ``sign_ste`` output depends upon the input storage type: - round_ste(default) = default - round_ste(row_sparse) = row_sparse - round_ste(csr) = csr Defined in ../src/operator/contrib/stes_op.cc:L80 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def abs(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise absolute value of the input. Example:: abs([-2, 0, 3]) = [2, 0, 3] The storage type of ``abs`` output depends upon the input storage type: - abs(default) = default - abs(row_sparse) = row_sparse - abs(csr) = csr Defined in ../src/operator/tensor/elemwise_unary_op_basic.cc:L720 Parameters ---------- data : Symbol The input array. name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def adagrad_update(weight=None, grad=None, history=None, lr=_Null, epsilon=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, name=None, attr=None, out=None, **kwargs): r"""Update function for AdaGrad optimizer. Referenced from *Adaptive Subgradient Methods for Online Learning and Stochastic Optimization*, and available at http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf. Updates are applied by:: rescaled_grad = clip(grad * rescale_grad, clip_gradient) history = history + square(rescaled_grad) w = w - learning_rate * rescaled_grad / sqrt(history + epsilon) Note that non-zero values for the weight decay option are not supported. Defined in ../src/operator/optimizer_op.cc:L978 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient history : Symbol History lr : float, required Learning rate epsilon : float, optional, default=1.00000001e-07 epsilon wd : float, optional, default=0 weight decay rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def adam_update(weight=None, grad=None, mean=None, var=None, lr=_Null, beta1=_Null, beta2=_Null, epsilon=_Null, wd=_Null, rescale_grad=_Null, clip_gradient=_Null, lazy_update=_Null, name=None, attr=None, out=None, **kwargs): r"""Update function for Adam optimizer. Adam is seen as a generalization of AdaGrad. Adam update consists of the following steps, where g represents gradient and m, v are 1st and 2nd order moment estimates (mean and variance). .. math:: g_t = \nabla J(W_{t-1})\\ m_t = \beta_1 m_{t-1} + (1 - \beta_1) g_t\\ v_t = \beta_2 v_{t-1} + (1 - \beta_2) g_t^2\\ W_t = W_{t-1} - \alpha \frac{ m_t }{ \sqrt{ v_t } + \epsilon } It updates the weights using:: m = beta1*m + (1-beta1)*grad v = beta2*v + (1-beta2)*(grad**2) w += - learning_rate * m / (sqrt(v) + epsilon) However, if grad's storage type is ``row_sparse``, ``lazy_update`` is True and the storage type of weight is the same as those of m and v, only the row slices whose indices appear in grad.indices are updated (for w, m and v):: for row in grad.indices: m[row] = beta1*m[row] + (1-beta1)*grad[row] v[row] = beta2*v[row] + (1-beta2)*(grad[row]**2) w[row] += - learning_rate * m[row] / (sqrt(v[row]) + epsilon) Defined in ../src/operator/optimizer_op.cc:L686 Parameters ---------- weight : Symbol Weight grad : Symbol Gradient mean : Symbol Moving mean var : Symbol Moving variance lr : float, required Learning rate beta1 : float, optional, default=0.899999976 The decay rate for the 1st moment estimates. beta2 : float, optional, default=0.999000013 The decay rate for the 2nd moment estimates. epsilon : float, optional, default=9.99999994e-09 A small constant for numerical stability. wd : float, optional, default=0 Weight decay augments the objective function with a regularization term that penalizes large weights. The penalty scales with the square of the magnitude of each weight. rescale_grad : float, optional, default=1 Rescale gradient to grad = rescale_grad*grad. clip_gradient : float, optional, default=-1 Clip gradient to the range of [-clip_gradient, clip_gradient] If clip_gradient <= 0, gradient clipping is turned off. grad = max(min(grad, clip_gradient), -clip_gradient). lazy_update : boolean, optional, default=1 If true, lazy updates are applied if gradient's stype is row_sparse and all of w, m and v have the same stype name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def add_n(*args, **kwargs): r"""Adds all input arguments element-wise. .. math:: add\_n(a_1, a_2, ..., a_n) = a_1 + a_2 + ... + a_n ``add_n`` is potentially more efficient than calling ``add`` by `n` times. The storage type of ``add_n`` output depends on storage types of inputs - add_n(row_sparse, row_sparse, ..) = row_sparse - add_n(default, csr, default) = default - add_n(any input combinations longer than 4 (>4) with at least one default type) = default - otherwise, ``add_n`` falls all inputs back to default storage and generates default storage Defined in ../src/operator/tensor/elemwise_sum.cc:L156 This function support variable length of positional input. Parameters ---------- args : Symbol[] Positional input arguments name : string, optional. Name of the resulting symbol. Returns ------- Symbol The result symbol. """ return (0,) def arccos(data=None, name=None, attr=None, out=None, **kwargs): r"""Returns element-wise inverse cosine of the input array. The input should be in
<filename>trio_cdp/generated/css.py # DO NOT EDIT THIS FILE! # # This code is generated off of PyCDP modules. If you need to make # changes, edit the generator and regenerate all of the modules. from __future__ import annotations import typing from ..context import get_connection_context, get_session_context import cdp.css from cdp.css import ( CSSComputedStyleProperty, CSSContainerQuery, CSSKeyframeRule, CSSKeyframesRule, CSSMedia, CSSProperty, CSSRule, CSSStyle, CSSStyleSheetHeader, FontFace, FontVariationAxis, FontsUpdated, InheritedStyleEntry, MediaQuery, MediaQueryExpression, MediaQueryResultChanged, PlatformFontUsage, PseudoElementMatches, RuleMatch, RuleUsage, SelectorList, ShorthandEntry, SourceRange, StyleDeclarationEdit, StyleSheetAdded, StyleSheetChanged, StyleSheetId, StyleSheetOrigin, StyleSheetRemoved, Value ) async def add_rule( style_sheet_id: StyleSheetId, rule_text: str, location: SourceRange ) -> CSSRule: r''' Inserts a new rule with the given ``ruleText`` in a stylesheet with given ``styleSheetId``, at the position specified by ``location``. :param style_sheet_id: The css style sheet identifier where a new rule should be inserted. :param rule_text: The text of a new rule. :param location: Text position of a new rule in the target style sheet. :returns: The newly created rule. ''' session = get_session_context('css.add_rule') return await session.execute(cdp.css.add_rule(style_sheet_id, rule_text, location)) async def collect_class_names( style_sheet_id: StyleSheetId ) -> typing.List[str]: r''' Returns all class names from specified stylesheet. :param style_sheet_id: :returns: Class name list. ''' session = get_session_context('css.collect_class_names') return await session.execute(cdp.css.collect_class_names(style_sheet_id)) async def create_style_sheet( frame_id: cdp.page.FrameId ) -> StyleSheetId: r''' Creates a new special "via-inspector" stylesheet in the frame with given ``frameId``. :param frame_id: Identifier of the frame where "via-inspector" stylesheet should be created. :returns: Identifier of the created "via-inspector" stylesheet. ''' session = get_session_context('css.create_style_sheet') return await session.execute(cdp.css.create_style_sheet(frame_id)) async def disable() -> None: r''' Disables the CSS agent for the given page. ''' session = get_session_context('css.disable') return await session.execute(cdp.css.disable()) async def enable() -> None: r''' Enables the CSS agent for the given page. Clients should not assume that the CSS agent has been enabled until the result of this command is received. ''' session = get_session_context('css.enable') return await session.execute(cdp.css.enable()) async def force_pseudo_state( node_id: cdp.dom.NodeId, forced_pseudo_classes: typing.List[str] ) -> None: r''' Ensures that the given node will have specified pseudo-classes whenever its style is computed by the browser. :param node_id: The element id for which to force the pseudo state. :param forced_pseudo_classes: Element pseudo classes to force when computing the element's style. ''' session = get_session_context('css.force_pseudo_state') return await session.execute(cdp.css.force_pseudo_state(node_id, forced_pseudo_classes)) async def get_background_colors( node_id: cdp.dom.NodeId ) -> typing.Tuple[typing.Optional[typing.List[str]], typing.Optional[str], typing.Optional[str]]: r''' :param node_id: Id of the node to get background colors for. :returns: A tuple with the following items: 0. **backgroundColors** - *(Optional)* The range of background colors behind this element, if it contains any visible text. If no visible text is present, this will be undefined. In the case of a flat background color, this will consist of simply that color. In the case of a gradient, this will consist of each of the color stops. For anything more complicated, this will be an empty array. Images will be ignored (as if the image had failed to load). 1. **computedFontSize** - *(Optional)* The computed font size for this node, as a CSS computed value string (e.g. '12px'). 2. **computedFontWeight** - *(Optional)* The computed font weight for this node, as a CSS computed value string (e.g. 'normal' or '100'). ''' session = get_session_context('css.get_background_colors') return await session.execute(cdp.css.get_background_colors(node_id)) async def get_computed_style_for_node( node_id: cdp.dom.NodeId ) -> typing.List[CSSComputedStyleProperty]: r''' Returns the computed style for a DOM node identified by ``nodeId``. :param node_id: :returns: Computed style for the specified DOM node. ''' session = get_session_context('css.get_computed_style_for_node') return await session.execute(cdp.css.get_computed_style_for_node(node_id)) async def get_inline_styles_for_node( node_id: cdp.dom.NodeId ) -> typing.Tuple[typing.Optional[CSSStyle], typing.Optional[CSSStyle]]: r''' Returns the styles defined inline (explicitly in the "style" attribute and implicitly, using DOM attributes) for a DOM node identified by ``nodeId``. :param node_id: :returns: A tuple with the following items: 0. **inlineStyle** - *(Optional)* Inline style for the specified DOM node. 1. **attributesStyle** - *(Optional)* Attribute-defined element style (e.g. resulting from "width=20 height=100%"). ''' session = get_session_context('css.get_inline_styles_for_node') return await session.execute(cdp.css.get_inline_styles_for_node(node_id)) async def get_matched_styles_for_node( node_id: cdp.dom.NodeId ) -> typing.Tuple[typing.Optional[CSSStyle], typing.Optional[CSSStyle], typing.Optional[typing.List[RuleMatch]], typing.Optional[typing.List[PseudoElementMatches]], typing.Optional[typing.List[InheritedStyleEntry]], typing.Optional[typing.List[CSSKeyframesRule]]]: r''' Returns requested styles for a DOM node identified by ``nodeId``. :param node_id: :returns: A tuple with the following items: 0. **inlineStyle** - *(Optional)* Inline style for the specified DOM node. 1. **attributesStyle** - *(Optional)* Attribute-defined element style (e.g. resulting from "width=20 height=100%"). 2. **matchedCSSRules** - *(Optional)* CSS rules matching this node, from all applicable stylesheets. 3. **pseudoElements** - *(Optional)* Pseudo style matches for this node. 4. **inherited** - *(Optional)* A chain of inherited styles (from the immediate node parent up to the DOM tree root). 5. **cssKeyframesRules** - *(Optional)* A list of CSS keyframed animations matching this node. ''' session = get_session_context('css.get_matched_styles_for_node') return await session.execute(cdp.css.get_matched_styles_for_node(node_id)) async def get_media_queries() -> typing.List[CSSMedia]: r''' Returns all media queries parsed by the rendering engine. :returns: ''' session = get_session_context('css.get_media_queries') return await session.execute(cdp.css.get_media_queries()) async def get_platform_fonts_for_node( node_id: cdp.dom.NodeId ) -> typing.List[PlatformFontUsage]: r''' Requests information about platform fonts which we used to render child TextNodes in the given node. :param node_id: :returns: Usage statistics for every employed platform font. ''' session = get_session_context('css.get_platform_fonts_for_node') return await session.execute(cdp.css.get_platform_fonts_for_node(node_id)) async def get_style_sheet_text( style_sheet_id: StyleSheetId ) -> str: r''' Returns the current textual content for a stylesheet. :param style_sheet_id: :returns: The stylesheet text. ''' session = get_session_context('css.get_style_sheet_text') return await session.execute(cdp.css.get_style_sheet_text(style_sheet_id)) async def set_container_query_text( style_sheet_id: StyleSheetId, range_: SourceRange, text: str ) -> CSSContainerQuery: r''' Modifies the expression of a container query. **EXPERIMENTAL** :param style_sheet_id: :param range_: :param text: :returns: The resulting CSS container query rule after modification. ''' session = get_session_context('css.set_container_query_text') return await session.execute(cdp.css.set_container_query_text(style_sheet_id, range_, text)) async def set_effective_property_value_for_node( node_id: cdp.dom.NodeId, property_name: str, value: str ) -> None: r''' Find a rule with the given active property for the given node and set the new value for this property :param node_id: The element id for which to set property. :param property_name: :param value: ''' session = get_session_context('css.set_effective_property_value_for_node') return await session.execute(cdp.css.set_effective_property_value_for_node(node_id, property_name, value)) async def set_keyframe_key( style_sheet_id: StyleSheetId, range_: SourceRange, key_text: str ) -> Value: r''' Modifies the keyframe rule key text. :param style_sheet_id: :param range_: :param key_text: :returns: The resulting key text after modification. ''' session = get_session_context('css.set_keyframe_key') return await session.execute(cdp.css.set_keyframe_key(style_sheet_id, range_, key_text)) async def set_local_fonts_enabled( enabled: bool ) -> None: r''' Enables/disables rendering of local CSS fonts (enabled by default). **EXPERIMENTAL** :param enabled: Whether rendering of local fonts is enabled. ''' session = get_session_context('css.set_local_fonts_enabled') return await session.execute(cdp.css.set_local_fonts_enabled(enabled)) async def set_media_text( style_sheet_id: StyleSheetId, range_: SourceRange, text: str ) -> CSSMedia: r''' Modifies the rule selector. :param style_sheet_id: :param range_: :param text: :returns: The resulting CSS media rule after modification. ''' session = get_session_context('css.set_media_text') return await session.execute(cdp.css.set_media_text(style_sheet_id, range_, text)) async def set_rule_selector( style_sheet_id: StyleSheetId, range_: SourceRange, selector: str ) -> SelectorList: r''' Modifies the rule selector. :param style_sheet_id: :param range_: :param selector: :returns: The resulting selector list after modification. ''' session = get_session_context('css.set_rule_selector') return await session.execute(cdp.css.set_rule_selector(style_sheet_id, range_, selector)) async def set_style_sheet_text( style_sheet_id: StyleSheetId, text: str ) -> typing.Optional[str]: r''' Sets the new stylesheet text. :param style_sheet_id: :param text: :returns: *(Optional)* URL of source map associated with script (if any). ''' session = get_session_context('css.set_style_sheet_text') return await session.execute(cdp.css.set_style_sheet_text(style_sheet_id, text)) async def set_style_texts( edits: typing.List[StyleDeclarationEdit] ) -> typing.List[CSSStyle]: r''' Applies specified style edits one after another in the given order. :param edits: :returns: The resulting styles after modification. ''' session = get_session_context('css.set_style_texts') return await session.execute(cdp.css.set_style_texts(edits)) async def start_rule_usage_tracking() -> None: r''' Enables the selector recording. ''' session = get_session_context('css.start_rule_usage_tracking') return await session.execute(cdp.css.start_rule_usage_tracking()) async def stop_rule_usage_tracking() -> typing.List[RuleUsage]: r''' Stop tracking rule usage and return the list of rules that were used since last call to ``takeCoverageDelta`` (or since start of coverage instrumentation) :returns: ''' session = get_session_context('css.stop_rule_usage_tracking') return await session.execute(cdp.css.stop_rule_usage_tracking()) async def take_computed_style_updates() -> typing.List[cdp.dom.NodeId]: r''' Polls the next batch of computed style updates. **EXPERIMENTAL** :returns: The list of node Ids that have their tracked computed styles updated ''' session = get_session_context('css.take_computed_style_updates') return await session.execute(cdp.css.take_computed_style_updates()) async def take_coverage_delta() -> typing.Tuple[typing.List[RuleUsage], float]: r''' Obtain list of rules that became used since last call to this method (or since start of coverage instrumentation) :returns: A tuple with the following items: 0. **coverage** - 1. **timestamp** - Monotonically increasing time, in seconds. ''' session = get_session_context('css.take_coverage_delta') return await session.execute(cdp.css.take_coverage_delta()) async def track_computed_style_updates( properties_to_track:
else had the book on hold, the # book is now available for anyone to check out. If someone else # had a hold, the license is now reserved for the next patron. # If someone else had a hold, the license is now reserved for the # next patron, and we need to update that hold. _db = Session.object_session(hold) licensepool = hold.license_pool _db.delete(hold) self.update_licensepool(licensepool) return True def patron_activity(self, patron, pin): """Look up non-expired loans for this collection in the database.""" _db = Session.object_session(patron) loans = ( _db.query(Loan) .join(Loan.license_pool) .filter(LicensePool.collection_id == self.collection_id) .filter(Loan.patron == patron) .filter(Loan.end >= utc_now()) ) # Get the patron's holds. If there are any expired holds, delete them. # Update the end date and position for the remaining holds. holds = ( _db.query(Hold) .join(Hold.license_pool) .filter(LicensePool.collection_id == self.collection_id) .filter(Hold.patron == patron) ) remaining_holds = [] for hold in holds: if hold.end and hold.end < utc_now(): _db.delete(hold) self.update_licensepool(hold.license_pool) else: self._update_hold_end_date(hold) remaining_holds.append(hold) return [ LoanInfo( loan.license_pool.collection, loan.license_pool.data_source.name, loan.license_pool.identifier.type, loan.license_pool.identifier.identifier, loan.start, loan.end, external_identifier=loan.external_identifier, ) for loan in loans ] + [ HoldInfo( hold.license_pool.collection, hold.license_pool.data_source.name, hold.license_pool.identifier.type, hold.license_pool.identifier.identifier, start_date=hold.start, end_date=hold.end, hold_position=hold.position, ) for hold in remaining_holds ] def update_loan(self, loan, status_doc=None): """Check a loan's status, and if it is no longer active, delete the loan and update its pool's availability. """ _db = Session.object_session(loan) if not status_doc: status_doc = self.get_license_status_document(loan) status = status_doc.get("status") # We already check that the status is valid in get_license_status_document, # but if the document came from a notification it hasn't been checked yet. if status not in self.STATUS_VALUES: raise BadResponseException( "The License Status Document had an unknown status value." ) if status in [ self.REVOKED_STATUS, self.RETURNED_STATUS, self.CANCELLED_STATUS, self.EXPIRED_STATUS, ]: # This loan is no longer active. Update the pool's availability # and delete the loan. # Update the license loan.license.checkin() # If there are holds, the license is reserved for the next patron. _db.delete(loan) self.update_licensepool(loan.license_pool) def checkout_to_external_library(self, client, licensepool, hold=None): try: return self._checkout(client, licensepool, hold) except NoAvailableCopies as e: return self._place_hold(client, licensepool) def checkin_from_external_library(self, client, loan): self._checkin(loan) def fulfill_for_external_library(self, client, loan, mechanism): return self._fulfill(loan) def release_hold_from_external_library(self, client, hold): return self._release_hold(hold) class ODLXMLParser(OPDSXMLParser): NAMESPACES = dict(OPDSXMLParser.NAMESPACES, odl="http://opds-spec.org/odl") class ODLImporter(OPDSImporter): """Import information and formats from an ODL feed. The only change from OPDSImporter is that this importer extracts format information from 'odl:license' tags. """ NAME = ODLAPI.NAME PARSER_CLASS = ODLXMLParser # The media type for a License Info Document, used to get information # about the license. LICENSE_INFO_DOCUMENT_MEDIA_TYPE = "application/vnd.odl.info+json" FEEDBOOKS_AUDIO = "{0}; protection={1}".format( MediaTypes.AUDIOBOOK_MANIFEST_MEDIA_TYPE, DeliveryMechanism.FEEDBOOKS_AUDIOBOOK_DRM, ) CONTENT_TYPE = "content-type" DRM_SCHEME = "drm-scheme" LICENSE_FORMATS = { FEEDBOOKS_AUDIO: { CONTENT_TYPE: MediaTypes.AUDIOBOOK_MANIFEST_MEDIA_TYPE, DRM_SCHEME: DeliveryMechanism.FEEDBOOKS_AUDIOBOOK_DRM, } } @classmethod def fetch_license_info(cls, document_link: str, do_get: Callable) -> Optional[dict]: status_code, _, response = do_get(document_link, headers={}) if status_code in (200, 201): license_info_document = json.loads(response) return license_info_document else: logging.warning( f"License Info Document is not available. " f"Status link {document_link} failed with {status_code} code." ) return None @classmethod def parse_license_info( cls, license_info_document: dict, license_info_link: str, checkout_link: str, ) -> Optional[LicenseData]: """Check the license's attributes passed as parameters: - if they're correct, turn them into a LicenseData object - otherwise, return a None :param license_info_document: License Info Document :param license_info_link: Link to fetch License Info Document :param checkout_link: License's checkout link :return: LicenseData if all the license's attributes are correct, None, otherwise """ identifier = license_info_document.get("identifier") document_status = license_info_document.get("status") document_checkouts = license_info_document.get("checkouts", {}) document_left = document_checkouts.get("left") document_available = document_checkouts.get("available") document_terms = license_info_document.get("terms", {}) document_expires = document_terms.get("expires") document_concurrency = document_terms.get("concurrency") document_format = license_info_document.get("format") if identifier is None: logging.error("License info document has no identifier.") return None expires = None if document_expires is not None: expires = dateutil.parser.parse(document_expires) expires = util.datetime_helpers.to_utc(expires) if document_status is not None: status = LicenseStatus.get(document_status) if status.value != document_status: logging.warning( f"Identifier # {identifier} unknown status value " f"{document_status} defaulting to {status.value}." ) else: status = LicenseStatus.unavailable logging.warning( f"Identifier # {identifier} license info document does not have " f"required key 'status'." ) if document_available is not None: available = int(document_available) else: available = 0 logging.warning( f"Identifier # {identifier} license info document does not have " f"required key 'checkouts.available'." ) left = None if document_left is not None: left = int(document_left) concurrency = None if document_concurrency is not None: concurrency = int(document_concurrency) content_types = None if document_format is not None: if isinstance(document_format, str): content_types = [document_format] elif isinstance(document_format, list): content_types = document_format return LicenseData( identifier=identifier, checkout_url=checkout_link, status_url=license_info_link, expires=expires, checkouts_left=left, checkouts_available=available, status=status, terms_concurrency=concurrency, content_types=content_types, ) @classmethod def get_license_data( cls, license_info_link: str, checkout_link: str, feed_license_identifier: str, feed_license_expires: str, feed_concurrency: int, do_get: Callable, ) -> Optional[LicenseData]: license_info_document = cls.fetch_license_info(license_info_link, do_get) if not license_info_document: return None parsed_license = cls.parse_license_info( license_info_document, license_info_link, checkout_link ) if not parsed_license: return None if parsed_license.identifier != feed_license_identifier: # There is a mismatch between the license info document and # the feed we are importing. Since we don't know which to believe # we log an error and continue. logging.error( f"Mismatch between license identifier in the feed ({feed_license_identifier}) " f"and the identifier in the license info document " f"({parsed_license.identifier}) ignoring license completely." ) return None if parsed_license.expires != feed_license_expires: logging.error( f"License identifier {feed_license_identifier}. Mismatch between license " f"expiry in the feed ({feed_license_expires}) and the expiry in the license " f"info document ({parsed_license.expires}) setting license status " f"to unavailable." ) parsed_license.status = LicenseStatus.unavailable if parsed_license.terms_concurrency != feed_concurrency: logging.error( f"License identifier {feed_license_identifier}. Mismatch between license " f"concurrency in the feed ({feed_concurrency}) and the " f"concurrency in the license info document (" f"{parsed_license.terms_concurrency}) setting license status " f"to unavailable." ) parsed_license.status = LicenseStatus.unavailable return parsed_license @classmethod def _detail_for_elementtree_entry( cls, parser, entry_tag, feed_url=None, do_get=None ): do_get = do_get or Representation.cautious_http_get # TODO: Review for consistency when updated ODL spec is ready. subtag = parser.text_of_optional_subtag data = OPDSImporter._detail_for_elementtree_entry(parser, entry_tag, feed_url) formats = [] licenses = [] odl_license_tags = parser._xpath(entry_tag, "odl:license") or [] medium = None for odl_license_tag in odl_license_tags: identifier = subtag(odl_license_tag, "dcterms:identifier") full_content_type = subtag(odl_license_tag, "dcterms:format") if not medium: medium = Edition.medium_from_media_type(full_content_type) # By default, dcterms:format includes the media type of a # DRM-free resource. content_type = full_content_type drm_schemes = [] # But it may instead describe an audiobook protected with # the Feedbooks access-control scheme. if full_content_type == cls.FEEDBOOKS_AUDIO: content_type = MediaTypes.AUDIOBOOK_MANIFEST_MEDIA_TYPE drm_schemes.append(DeliveryMechanism.FEEDBOOKS_AUDIOBOOK_DRM) # Additional DRM schemes may be described in <odl:protection> # tags. protection_tags = parser._xpath(odl_license_tag, "odl:protection") or [] for protection_tag in protection_tags: drm_scheme = subtag(protection_tag, "dcterms:format") if drm_scheme: drm_schemes.append(drm_scheme) for drm_scheme in drm_schemes or [None]: formats.append( FormatData( content_type=content_type, drm_scheme=drm_scheme, rights_uri=RightsStatus.IN_COPYRIGHT, ) ) data["medium"] = medium checkout_link = None for link_tag in parser._xpath(odl_license_tag, "odl:tlink") or []: rel = link_tag.attrib.get("rel") if rel == Hyperlink.BORROW: checkout_link = link_tag.attrib.get("href") break # Look for a link to the License Info Document for this license. odl_status_link = None for link_tag in parser._xpath(odl_license_tag, "atom:link") or []: attrib = link_tag.attrib rel = attrib.get("rel") type = attrib.get("type", "") if rel == "self" and type.startswith( cls.LICENSE_INFO_DOCUMENT_MEDIA_TYPE ): odl_status_link = attrib.get("href") break expires = None concurrent_checkouts = None terms = parser._xpath(odl_license_tag, "odl:terms") if terms: concurrent_checkouts = subtag(terms[0], "odl:concurrent_checkouts") expires = subtag(terms[0], "odl:expires") if concurrent_checkouts is not None: concurrent_checkouts = int(concurrent_checkouts) if expires is not None: expires = to_utc(dateutil.parser.parse(expires)) if not odl_status_link: parsed_license = None else: parsed_license = cls.get_license_data( odl_status_link, checkout_link, identifier, expires, concurrent_checkouts, do_get, ) if parsed_license is not None: licenses.append(parsed_license) if not data.get("circulation"): data["circulation"] = dict() if not data["circulation"].get("formats"): data["circulation"]["formats"] = [] data["circulation"]["formats"].extend(formats) if not data["circulation"].get("licenses"): data["circulation"]["licenses"] = [] data["circulation"]["licenses"].extend(licenses) data["circulation"]["licenses_owned"] = None data["circulation"]["licenses_available"] = None data["circulation"]["licenses_reserved"] = None data["circulation"]["patrons_in_hold_queue"] = None return data class ODLImportMonitor(OPDSImportMonitor): """Import information from an ODL feed.""" PROTOCOL = ODLImporter.NAME SERVICE_NAME = "ODL Import Monitor" def __init__(self, _db, collection, import_class, **import_class_kwargs): # Always force reimport ODL collections to get up to date license information super().__init__( _db, collection, import_class, force_reimport=True, **import_class_kwargs ) class ODLHoldReaper(CollectionMonitor): """Check for holds that have expired and delete them, and update the holds queues for their pools.""" SERVICE_NAME = "ODL Hold Reaper" PROTOCOL = ODLAPI.NAME def __init__(self, _db, collection=None, api=None, **kwargs): super(ODLHoldReaper, self).__init__(_db, collection, **kwargs) self.api =
== labels_mapping else: labels_mapping = spect_dict['labels_mapping'] if 'timebin_dur' in locals(): assert spect_dict['timebin_dur'] == timebin_dur else: timebin_dur = spect_dict['timebin_dur'] if 'spect_params' in locals(): assert spect_dict['spect_params'] == spect_params else: spect_params = spect_dict['spect_params'] X = np.concatenate(spects, axis=1) Y = np.concatenate(labeled_timebins) spect_ID_vector = np.asarray(spect_ID_vector, dtype='int') assert X.shape[-1] == Y.shape[0] # Y has shape (timebins, 1) if X.shape[-1] > target_dur / timebin_dur: correct_length = np.round(target_dur / timebin_dur).astype(int) X = X[:, :correct_length] Y = Y[:correct_length, :] spect_ID_vector = spect_ID_vector[:correct_length] data_dict = {'spects': spects, 'filenames': filenames, 'freq_bins': freq_bins, 'time_bins': all_time_bins, 'labels': labels, 'labeled_timebins': labeled_timebins, 'spect_ID_vector': spect_ID_vector, 'X_' + dict_name: X, 'Y_' + dict_name: Y, 'timebin_dur': timebin_dur, 'spect_params': spect_params, 'labels_mapping': labels_mapping} print('saving data dictionary in {}'.format(output_dir)) data_dict_path = os.path.join(output_dir, dict_name + '_data_dict') joblib.dump(data_dict, data_dict_path) saved_data_dict_paths[dict_name] = data_dict_path return saved_data_dict_paths def make_data_dict_from_spect_files(labelset, spect_files='./spect_files', output_dir='.', dict_name='test'): """load data from a list of spect files and save into a dictionary. For running a summary of prediction error on a list of files you choose. Parameters ---------- labelset : list of str, labels used spect_files : str full path to file containing 'spect_files' list of tuples saved by function make_spects_from_list_of_files. Default is None, in which case this function looks for a file named 'spect_files' in output_dir. output_dir : str path to output_dir containing .spect files. Default is '.' (current working directory). dict_name : str prefix to filename '_data_dict' in which the dictionary is saved. Default is 'test'. Returns ------- data_dict_path Function saves 'data_dict' file in output_dir, with following structure: spects : list of ndarray, spectrograms from audio files filenames : list same length as spects, filename of each audio file that was converted to spectrogram freq_bins : ndarray vector of frequencies where each value is a bin center. Same for all spectrograms time_bins : list of ndarrays, each a vector of times where each value is a bin center. One for each spectrogram labelset : list of strings, labels corresponding to each spectrogram labeled_timebins : list of ndarrays, each same length as time_bins but value is a label for that bin. In other words, the labels vector is mapped onto the time_bins vector for the spectrogram. X : ndarray X_train, X_val, or X_test, depending on which data_dict you are looking at. Some number of spectrograms concatenated, enough so that the total duration of the spectrogram in time bins is equal to or greater than the target duration. If greater than target, then X is truncated so it is equal to the target. Y : ndarray Concatenated labeled_timebins vectors corresponding to the spectrograms in X. spect_ID_vector : ndarray Vector where each element is an ID for a song. Used to randomly grab subsets of data of a target duration while still having the subset be composed of individual songs as much as possible. So this vector will look like: [0, 0, 0, ..., 1, 1, 1, ... , n, n, n] where n is equal to or (a little) less than the length of spects. spect_ID_vector.shape[-1] is the same as X.shape[-1] and Y.shape[0]. timebin_dur : float duration of a timebin in seconds from spectrograms spect_params : dict parameters for computing spectrogram as specified in config.ini file. Will be checked against .ini file when running other cli such as learn_curve.py labels_mapping : dict maps str labels for syllables to consecutive integers. As explained in docstring for make_spects_from_list_of_files. """ spect_list = joblib.load(spect_files) spect_files = [tup[0] for tup in spect_list] labels = itertools.chain.from_iterable( [tup[2] for tup in spect_list]) labels = set(labels) if labels != set(labelset): raise ValueError( 'labels from all spect_files are not consistent with ' 'labels in labelset.') spects = [] filenames = [] all_time_bins = [] labels = [] labeled_timebins = [] spect_ID_vector = [] for spect_ind, tup in enumerate(spect_list): spect_dict = joblib.load(tup[0]) spects.append(spect_dict['spect']) filenames.append(tup[0]) all_time_bins.append(spect_dict['time_bins']) labels.append(spect_dict['labels']) labeled_timebins.append(spect_dict['labeled_timebins']) spect_ID_vector.extend([spect_ind] * spect_dict['time_bins'].shape[-1]) if 'freq_bins' in locals(): assert np.array_equal(spect_dict['freq_bins'], freq_bins) else: freq_bins = spect_dict['freq_bins'] if 'labels_mapping' in locals(): assert spect_dict['labels_mapping'] == labels_mapping else: labels_mapping = spect_dict['labels_mapping'] if 'timebin_dur' in locals(): assert spect_dict['timebin_dur'] == timebin_dur else: timebin_dur = spect_dict['timebin_dur'] if 'spect_params' in locals(): assert spect_dict['spect_params'] == spect_params else: spect_params = spect_dict['spect_params'] X = np.concatenate(spects, axis=1) Y = np.concatenate(labeled_timebins) spect_ID_vector = np.asarray(spect_ID_vector, dtype='int') assert X.shape[-1] == Y.shape[0] # Y has shape (timebins, 1) data_dict = {'spects': spects, 'filenames': filenames, 'freq_bins': freq_bins, 'time_bins': all_time_bins, 'labels': labels, 'labeled_timebins': labeled_timebins, 'spect_ID_vector': spect_ID_vector, 'X_' + dict_name: X, 'Y_' + dict_name: Y, 'timebin_dur': timebin_dur, 'spect_params': spect_params, 'labels_mapping': labels_mapping} data_dict_path = os.path.join(output_dir, dict_name + '_data_dict') print('saving data dictionary {} in {}' .format(data_dict_path, os.path.abspath(output_dir))) joblib.dump(data_dict, data_dict_path) return data_dict_path def get_inds_for_dur(spect_ID_vector, labeled_timebins_vector, labels_mapping, target_duration, timebin_dur_in_s=0.001, max_iter=1000, method='incfreq'): """for getting a training set with random songs but constant duration draws songs at random and adds to list until total duration of all songs => target_duration then truncates at target duration Parameters ---------- spect_ID_vector : ndarray Vector where each element is an ID for a song. Used to randomly grab subsets of data of a target duration while still having the subset be composed of individual songs as much as possible. So this vector will look like: [0, 0, 0, ..., 1, 1, 1, ... , n, n, n] where n is equal to or (a little) less than the length of spects. spect_ID_vector.shape[-1] is the same as X.shape[-1] and Y.shape[0]. labeled_timebins_vector : ndarray Vector of same length as spect_ID_vector but each value is a class. labels_mapping : dict maps str labels to consecutive integers. Used to check that the randomly drawn data set contains all classes. target_duration : float target duration of training set in s timebin_dur_in_s : float duration of each timebin, i.e. each column in spectrogram, in seconds. default is 0.001 s (1 ms) max_iter : int number of iterations to try drawing random subset of song that contains all classes in labels mapping. Defaults is 1000. method : str {'rand', 'incfreq'} method by which to obtain subset from training set 'rand' grabs songs totally at random 'incfreq' grabs songs at random but starts from the subset that includes the least frequently occurring class. Continues to grab randomly in order of ascending frequency until all classes are present, and then goes back to 'rand' method. Default is 'incfreq'. Returns ------- inds_to_use : bool numpy boolean vector, True where row in X_train gets used (assumes X_train is one long spectrogram, consisting of all training spectrograms concatenated, and each row being one timebin) """ labeled_timebins_vector = np.squeeze(labeled_timebins_vector) if labeled_timebins_vector.ndim > 1: raise ValueError('number of dimensions of labeled_timebins_vector should be 1 ' '(after np.squeeze), but was equal to {}' .format(labeled_timebins_vector.ndim)) iter = 1 while 1: # keep iterating until we randomly draw subset that meets our criteria if 'inds_to_use' in locals(): del inds_to_use spect_IDs, spect_timebins = np.unique(spect_ID_vector, return_counts=True) if iter == 1: # sanity check: # spect_IDs should always start from 0 # and go to n-1 where n is # of spectrograms assert np.array_equal(spect_IDs, np.arange(spect_IDs.shape[-1])) spect_IDs = spect_IDs.tolist() # because we need to pop off ids for 'incfreq' spect_IDs_in_subset = [] total_dur_in_timebins = 0 if method == 'incfreq': classes, counts = np.unique(labeled_timebins_vector, return_counts=True) int_labels_without_int_flag = [val for val in labels_mapping.values() if type(val) is int] if set(classes) != set(int_labels_without_int_flag): raise ValueError('classes in labeled_timebins_vector ' 'do not match classes in labels_mapping.') freq_rank = np.argsort(counts).tolist() # reason for doing it in this Schliemel-the-painter-looking way is that # I want to make sure all classes are represented first, but then # go back to just grabbing songs completely at random while freq_rank: # is not an empty list yet curr_class = classes[freq_rank.pop(0)] # if curr_class already represented in subset, skip it if 'inds_to_use' in locals(): classes_already_in_subset = np.unique( labeled_timebins_vector[inds_to_use]) if curr_class in classes_already_in_subset: continue inds_this_class = np.where(labeled_timebins_vector==curr_class)[0] spect_IDs_this_class = np.unique(spect_ID_vector[inds_this_class]) # keep only the spect IDs we haven't popped off main list already spect_IDs_this_class = [spect_ID_this_class for spect_ID_this_class in spect_IDs_this_class if spect_ID_this_class in spect_IDs] rand_spect_ID = np.random.choice(spect_IDs_this_class) spect_IDs_in_subset.append(rand_spect_ID) spect_IDs.pop(spect_IDs.index(rand_spect_ID)) # so as not
<filename>tests/test_operators.py import pytest from giving import give, given, giver, operators as op TOLERANCE = 1e-6 def fib(n): a = 0 b = 1 give(a, b) for _ in range(n - 1): a, b = b, a + b give(a, b) return b def things(*values): for a in values: give(a) def test_getitem(): with given() as gv: results = [] gv.pipe(op.getitem("b")).subscribe(results.append) fib(5) assert results == [1, 1, 2, 3, 5] def test_getitem2(): with given() as gv: results = [] gv.pipe(op.getitem("a", "b")).subscribe(results.append) fib(5) assert results == [(0, 1), (1, 1), (1, 2), (2, 3), (3, 5)] def test_getitem_tuple(): with given() as gv: results = gv.kmap(lambda a: (a, a * a))[1].accum() things(1, 2, 3) assert results == [1, 4, 9] def test_getitem_strict(): with given() as gv: results = [] gv.pipe(op.getitem("a", "b", strict=True)).subscribe(results.append) fib(5) with pytest.raises(KeyError): give(a=123) def test_format(): with given() as gv: results = [] gv.pipe(op.format("b={b}")).subscribe(results.append) fib(5) assert results == ["b=1", "b=1", "b=2", "b=3", "b=5"] def test_format2(): with given() as gv: results = [] gv.pipe(op.getitem("b"), op.format("b={}")).subscribe(results.append) fib(5) assert results == ["b=1", "b=1", "b=2", "b=3", "b=5"] def test_format3(): with given() as gv: results = [] gv.pipe(op.getitem("a", "b"), op.format("a={},b={}")).subscribe(results.append) fib(5) assert results == [ "a=0,b=1", "a=1,b=1", "a=1,b=2", "a=2,b=3", "a=3,b=5", ] def test_format4(): with given() as gv: results = gv.format("{a} {b}", skip_missing=True).accum() give(a=1) give(b=2) give(a=3, b=4) assert results == ["3 4"] def test_format_raw(): with given() as gv: results = gv["a", "b"].format("{}", raw=True).accum() give(a=1, b=2) give(a=3, b=4) assert results == ["(1, 2)", "(3, 4)"] def test_kmap(): with given() as gv: results = [] gv.pipe(op.kmap(lambda b: -b)).subscribe(results.append) fib(5) assert results == [-1, -1, -2, -3, -5] def test_kmap2(): with given() as gv: results = gv.kmap(x=lambda **kw: -kw["b"], y=lambda a: a * a).accum() fib(5) assert results == [ {"x": -1, "y": 0}, {"x": -1, "y": 1}, {"x": -2, "y": 1}, {"x": -3, "y": 4}, {"x": -5, "y": 9}, ] def test_kmap_err(): with given() as gv: with pytest.raises(TypeError): gv.kmap(lambda a: -a, b=lambda b: -b) def test_augment(): with given() as gv: results = gv.augment(x=lambda **kw: -kw["b"], y=lambda a: a * a).accum() fib(5) assert results == [ {"a": 0, "b": 1, "x": -1, "y": 0}, {"a": 1, "b": 1, "x": -1, "y": 1}, {"a": 1, "b": 2, "x": -2, "y": 1}, {"a": 2, "b": 3, "x": -3, "y": 4}, {"a": 3, "b": 5, "x": -5, "y": 9}, ] def test_augment_overwrite(): with given() as gv: results = gv.kmap(a=lambda **kw: -kw["b"], b=lambda a: a * a).accum() fib(5) assert results == [ {"a": -1, "b": 0}, {"a": -1, "b": 1}, {"a": -2, "b": 1}, {"a": -3, "b": 4}, {"a": -5, "b": 9}, ] def test_kfilter(): with given() as gv: results = gv.kfilter(lambda a: a > 0)["a"].accum() things(0, 1, -2, 3, -4, 5) assert results == [1, 3, 5] def test_roll(): with given() as gv: results = gv["b"].roll(3).map(list).accum() fib(5) assert results == [[1], [1, 1], [1, 1, 2], [1, 2, 3], [2, 3, 5]] def test_rolling_average(): with given() as gv: results1 = [] results2 = [] bs = gv.pipe(op.getitem("b")) bs.pipe( op.average(scan=7), ).subscribe(results1.append) bs.pipe( op.roll(7), op.map(lambda xs: sum(xs) / len(xs)), ).subscribe(results2.append) fib(25) assert all(abs(m1 - m2) < TOLERANCE for m1, m2 in zip(results1, results2)) def test_rolling_average_and_variance(): with given() as gv: bs = gv.pipe(op.getitem("b")) results1 = [] bs.pipe( op.average_and_variance(scan=7), op.skip(1), ).subscribe(results1.append) def meanvar(xs): n = len(xs) if len(xs) >= 2: mean = sum(xs) / n var = sum((x - mean) ** 2 for x in xs) / (n - 1) return (mean, var) else: return (None, None) results2 = [] bs.pipe( op.roll(7), op.map(meanvar), op.skip(1), ).subscribe(results2.append) fib(25) assert all( abs(m1 - m2) < TOLERANCE and abs(v1 - v2) < TOLERANCE for (m1, v1), (m2, v2) in zip(results1, results2) ) def test_variance(): with given() as gv: bs = gv.pipe(op.getitem("b")) results1 = [] bs.pipe( op.variance(scan=7), op.skip(1), ).subscribe(results1.append) def varcalc(xs): n = len(xs) if len(xs) >= 2: mean = sum(xs) / n var = sum((x - mean) ** 2 for x in xs) / (n - 1) return var else: return (None, None) results2 = [] bs.pipe( op.roll(7), op.map(varcalc), op.skip(1), ).subscribe(results2.append) fib(25) assert all(abs(v1 - v2) < TOLERANCE for v1, v2 in zip(results1, results2)) def accum(obs): results = [] obs.subscribe(results.append) return results def test_average(): values = [1, 2, 10, 20] with given() as gv: gv = gv.pipe(op.getitem("a")) results1 = [] gv.pipe(op.average()).subscribe(results1.append) results2 = [] gv.pipe(op.average(scan=True)).subscribe(results2.append) results3 = [] gv.pipe(op.average(scan=2)).subscribe(results3.append) things(*values) assert results1 == [sum(values) / len(values)] assert results2 == [ sum(values[:i]) / len(values[:i]) for i in range(1, len(values) + 1) ] assert results3 == [values[0]] + [ (a + b) / 2 for a, b in zip(values[:-1], values[1:]) ] def test_count(): values = [1, 3, -4, 21, -8, -13] with given() as gv: gv = gv.pipe(op.getitem("a")) results1 = [] gv.pipe(op.count()).subscribe(results1.append) results2 = [] gv.pipe(op.count(lambda x: x > 0)).subscribe(results2.append) results3 = [] gv.pipe(op.count(lambda x: x > 0, scan=True)).subscribe(results3.append) results4 = [] gv.pipe(op.count(lambda x: x > 0, scan=3)).subscribe(results4.append) results5 = [] gv.pipe(op.count(scan=True)).subscribe(results5.append) results6 = [] gv.pipe(op.count(scan=3)).subscribe(results6.append) things(*values) assert results1 == [len(values)] assert results2 == [len([v for v in values if v > 0])] assert results3 == [1, 2, 2, 3, 3, 3] assert results4 == [1, 2, 2, 2, 1, 1] assert results5 == list(range(1, len(values) + 1)) assert results6 == [1, 2, 3, 3, 3, 3] def test_min(): values = [1, 3, -4, 21, -8, -13] with given() as gv: results = gv["a"].min().accum() things(*values) assert results == [-13] def test_max(): values = [1, 3, -4, 21, -8, -13] with given() as gv: results = gv["a"].max().accum() things(*values) assert results == [21] def test_min_cmp(): values = [1, 3, -4, 21, -8, -30] with given() as gv: results = gv["a"].min(comparer=lambda x, y: abs(x) - abs(y)).accum() things(*values) assert results == [1] def test_max_cmp(): values = [1, 3, -4, 21, -8, -30] with given() as gv: results = gv["a"].max(comparer=lambda x, y: abs(x) - abs(y)).accum() things(*values) assert results == [-30] def test_min_key(): values = [1, 3, -4, 21, -8, -30] with given() as gv: results = gv["a"].min(key=abs).accum() things(*values) assert results == [1] def test_max_key(): values = [1, 3, -4, 21, -8, -30] with given() as gv: results = gv["a"].max(key=abs).accum() things(*values) assert results == [-30] def test_min_key2(): values = [1, 3, -4, 21, -8, -30] with given() as gv: results = gv.min(key="a").accum() things(*values) assert results == [{"a": -30}] def test_sum(): values = [1, 3, -4, 21, -8, -17] with given() as gv: results = gv["a"].sum().accum() things(*values) assert results == [-4] def test_affix(): values = [1, 2, 3, 4] with given() as gv: results = [] gv.pipe( op.affix(b=gv.pipe(op.getitem("a"), op.map(lambda x: x * x))) ).subscribe(results.append) things(*values) assert results == [{"a": x, "b": x * x} for x in values] def test_affix2(): values = [1, 2, 3, 4] with given() as gv: results = gv.where("a").affix(asquare=lambda o: o.kmap(lambda a: a * a)).accum() give(b=7) things(*values) assert results == [{"a": x, "asquare": x * x} for x in values] def varia(): give(x=1) give(x=2, y=True) give(z=100) give(x=3, y=False) give(y=True) def test_where(): with given() as gv: everything = accum(gv) results1 = accum(gv.pipe(op.where("x"))) results2 = accum(gv.pipe(op.where(y=True))) results3 = accum(gv.pipe(op.where("x", y=True))) results4 = accum(gv.pipe(op.where(x=lambda x: x > 10))) results5 = accum(gv.pipe(op.where("!x"))) results6 = accum(gv.pipe(op.where(x=2, y=True))) varia() assert results1 == [d for d in everything if "x" in d] assert results2 == [d for d in everything if "y" in d and d["y"]] assert results3 == [d for d in everything if "x" in d and "y" in d and d["y"]] assert results4 == [d for d in everything if "x" in d and d["x"] > 10] assert results5 == [d for d in everything if "x" not in d] assert results6 == [{"x": 2, "y": True}] def test_where_any(): with given() as gv: everything = gv.accum() results = gv.where_any("x", "z").accum() varia() assert results == [d for d in everything if "x" in d or "z" in d] def aggron(n): for i in range(n): give(start=True) give(a=i) give(b=i * i) give(end=True) def test_collect_between(): with given() as gv: results = gv.pipe(op.collect_between("start", "end")).accum() aggron(3) assert results == [ {"start": True, "end": True, "a": 0, "b": 0}, {"start": True, "end": True, "a": 1, "b": 1}, {"start": True, "end": True, "a": 2, "b": 4}, ] def fact(n): giv = giver(n=n) giv(start=True) give(dummy=1234) if n <= 1: value = n else: f1
<reponame>tailoric/Pouty-Bot-Discord from discord.ext import commands, tasks from discord.utils import get import discord import json import datetime import logging import traceback from random import choice import re import asyncio from .utils.dataIO import DataIO from .utils.checks import is_owner_or_moderator from cogs.default import CustomHelpCommand from io import TextIOWrapper, BytesIO forbidden_word_pattern = re.compile(r'(\btrap\b|nigg(a|er)|fag(got)?)') class JumpMessageView(discord.ui.View): def __init__(self, message: discord.Message): super().__init__(timeout=None) self.add_item(discord.ui.Button(url=message.jump_url, label='Scroll Up', emoji="\N{UPWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}", style=discord.ButtonStyle.primary)) self.add_item(discord.ui.Button( url="https://discord.com/channels/187423852224053248/366659034410909717/", label="confirm you read the rules", emoji="\N{OPEN BOOK}" )) class AnimemesHelpFormat(CustomHelpCommand): def random_response(self): with open("data/rules_channel_phrases.json")as f: phrases = json.load(f) return choice(phrases["help"]) async def send_bot_help(self, mapping): channel = self.context.channel if channel and channel.id == 366659034410909717: await self.context.send(self.random_response()) return await super().send_bot_help(mapping) class ReadRules(commands.Cog): """ Animemes focused cog """ async def init_database(self): query = ''' CREATE TABLE IF NOT EXISTS new_memesters( user_id BIGINT, time_over timestamp ); ''' async with self.bot.db.acquire() as con: async with con.transaction(): await con.execute(query) async def add_new_memester(self, new_user): query = ''' INSERT INTO new_memesters VALUES ($1, $2) ON CONFLICT DO NOTHING ''' async with self.bot.db.acquire() as con: statement = await con.prepare(query) time_over = datetime.datetime.utcnow() + datetime.timedelta(weeks=1) async with con.transaction(): await statement.fetch(new_user.id, time_over) async def fetch_new_memesters(self): query = ''' SELECT * FROM new_memesters ''' async with self.bot.db.acquire() as con: async with con.transaction(): return await con.fetch(query) async def remove_user_from_new_list(self, user_id): query = ''' DELETE FROM new_memesters WHERE user_id = $1 ''' async with self.bot.db.acquire() as con: statement = await con.prepare(query) async with con.transaction(): await statement.fetch(user_id) def __init__(self, bot: commands.Bot): self.bucket = commands.CooldownMapping.from_cooldown(3, 600, commands.BucketType.member) self.bot = bot self._original_help_command = bot.help_command self.bot.help_command = AnimemesHelpFormat() self.bot.help_command.cog = self self.data_io = DataIO() self.checkers_channel = self.bot.get_channel(self.data_io.load_json("reddit_settings")["channel"]) self.animemes_guild = self.bot.get_guild(187423852224053248) if self.animemes_guild: self.memester_role = self.animemes_guild.get_role(189594836687519744) self.new_memester = self.animemes_guild.get_role(653273427435847702) self.join_log = self.animemes_guild.get_channel(595585060909088774) self.rules_channel = self.animemes_guild.get_channel(366659034410909717) self.lockdown_channel = self.animemes_guild.get_channel(596319943612432404) self.bot.loop.create_task(self.init_database()) self.bot.loop.create_task(self.setup_rules_database()) self.check_for_new_memester.start() self.join_counter = 0 self.join_limit = 5 self.join_timer = 6 with open("config/join_limit_settings.json") as f: settings = json.load(f) self.join_limit = settings["join_limit"] self.join_timer = settings["join_timer"] self.limit_reset.change_interval(hours=self.join_timer) def cog_unload(self): self.bot.help_command = self._original_help_command self.check_for_new_memester.stop() self.limit_reset.cancel() @commands.command(name="stuck") async def people_stuck(self, ctx): """ show how many people are still not able to read the rules """ memester_count = len(self.memester_role.members) + len(self.new_memester.members) await ctx.send(embed=discord.Embed(title=f"People stuck in #{self.rules_channel.name}", description=f"There are currently {self.animemes_guild.member_count - memester_count:,} users stuck still reading the rules.")) async def setup_rules_database(self): await self.bot.db.execute(""" CREATE TABLE IF NOT EXISTS rule_channel ( guild_id BIGINT NOT NULL primary key, channel_id BIGINT NOT NULL ) """) @commands.group(name="rules", invoke_without_command=True) @commands.guild_only() @is_owner_or_moderator() async def rules(self, ctx: commands.Context): """ A command for rewriting and posting the rules of the current server. The rules channel has to be setup via the `setup` subcommand. The command expects a text file to be uploaded on use. __Format rules__: - New lines of the original file are preserved. - Typical markdown rules apply `**bold**` `_italic_` etc. - An empty line tells the bot to post a new message otherwise the bot will fill up to 4000 character per message - to have an image in the embed use a single line starting with `!image` followed by the valid image url followed by an empty line - You have to use the [discord markdown](https://discord.com/developers/docs/reference#message-formatting) for channels, users, roles etc (check the link for more info) So instead of using @User you have to do `<@1234567890>` or instead of @Role you have to use `<&@1234456789>` """ if not ctx.message.attachments: await ctx.send("Please upload a text file with the rules") return rules_channel_id = await self.bot.db.fetchval(""" SELECT channel_id FROM rule_channel WHERE guild_id = $1 """, ctx.guild.id) if rules_channel_id: rules_channel = ctx.guild.get_channel(rules_channel_id) else: return await ctx.send("no rules channel setup") attachment = ctx.message.attachments[0] bytesIO = BytesIO(await attachment.read()) file_wrapper = TextIOWrapper(buffer=bytesIO, encoding='utf-8') paginator = commands.Paginator(prefix=None, suffix=None, max_size=4000) while (line := file_wrapper.readline()) != "": if line.strip() == "": paginator.close_page() continue paginator.add_line(line.strip("\n")) first_msg = None msg = None await rules_channel.purge(limit=None) for page in paginator.pages: if page.startswith("!image"): page = page.replace("!image ", "") embed = discord.Embed(colour=discord.Colour.blurple()) embed.set_image(url=page) else: embed = discord.Embed(description=page, colour=discord.Colour.blurple()) msg = await rules_channel.send(embed=embed) if not first_msg: first_msg = msg if first_msg and msg: await msg.edit(view=JumpMessageView(first_msg)) @rules.command(name="setup") @commands.guild_only() @is_owner_or_moderator() async def setup_rules(self, ctx: commands.Context, rules_channel: discord.TextChannel): """ Setup the channel where the rules for the `.rules` command are posted This command takes channel mention or channel id or channel name """ await self.bot.db.execute(""" INSERT INTO rule_channel(guild_id, channel_id) VALUES ($1, $2) ON CONFLICT (guild_id) DO UPDATE SET channel_id = $2 """, ctx.guild.id, rules_channel.id) await ctx.message.add_reaction("\N{WHITE HEAVY CHECK MARK}") @tasks.loop(hours=1) async def limit_reset(self): self.join_counter = 0 default_role = self.animemes_guild.default_role overwrite = self.rules_channel.overwrites_for(default_role) overwrite.send_messages = True await self.rules_channel.set_permissions(default_role, overwrite=overwrite) @is_owner_or_moderator() @commands.command(name="join_info", aliases=["ji", "joininfo"]) async def get_join_info(self, ctx): """ get info about how many people have joined and what the limit is """ time_diff = None time_info = "" if self.limit_reset.is_running(): time_diff = self.limit_reset.next_iteration - datetime.datetime.now(datetime.timezone.utc) hours, minutes = divmod(time_diff.seconds, 3600) minutes, seconds = divmod(minutes, 60) time_info = (f"next iteration in {hours} hours and {minutes} minutes") await ctx.send(f"{self.join_counter} users have joined and the limit is {self.join_limit}\n" f"Task running: {self.limit_reset.is_running()} with a cooldown of {self.join_timer} hours {time_info}") @is_owner_or_moderator() @commands.command(name="join_limit", aliases=["jl"]) async def set_join_limit(self, ctx, limit: int): """ set the join limit for this server """ if limit < 1: return await ctx.send("please choose a positive number bigger than 0") self.join_limit = limit await ctx.send(f"limit set to {self.join_limit}") with open("config/join_limit_settings.json", "w") as f: settings = {} settings["join_limit"] = self.join_limit settings["join_timer"] = self.join_timer json.dump(settings, f) @is_owner_or_moderator() @commands.command(name="join_timer", aliases=["jt", "jset", "jchange"]) async def set_join_timer(self, ctx, hours: int, when: int = 0): """ set a new join timer and also set in how many hours the task should start example: `.jt 6 8` which will make the task start every 6 hours after waiting 8 hours first """ self.join_timer = hours was_running = self.limit_reset.is_running() self.limit_reset.cancel() self.limit_reset.change_interval(hours=hours) with open("config/join_limit_settings.json", "w") as f: settings = {} settings["join_limit"] = self.join_limit settings["join_timer"] = self.join_timer json.dump(settings, f) response = f"join timer cooldown changed to {hours} hours" if when > 0 : response += f" and will start running in {when} hours" await ctx.send(response) if was_running: def is_previous_lockdown_message(m): return "join limit was exceeded try again in" in m.content await self.lockdown_channel.purge(limit=100, check=is_previous_lockdown_message) await self.lockdown_channel.send(f"current join limit was exceeded try again in {when} hours") await asyncio.sleep(when * 3600) self.limit_reset.start() @is_owner_or_moderator() @commands.command(name="join_timer_start", aliases=["jstart"]) async def start_join_timer(self, ctx, when: int = 0): """ start the join timer either now or in x hours """ self.limit_reset.cancel() if when > 0: await ctx.send(f"join timer will start in {when} hours") await asyncio.sleep(when * 3600) else: await ctx.send("join timer started") self.limit_reset.start() @is_owner_or_moderator() @commands.command(name="join_timer_stop", aliases=["jstop"]) async def stop_join_timer(self, ctx): self.limit_reset.cancel() @commands.Cog.listener() async def on_message(self, message): channel = message.channel if message.author.id == self.bot.user.id or not message.guild: return if channel.id != self.rules_channel.id: return iam_memester_regex = re.compile(r'\.?i\s?a?m\s?meme?(ma)?st[ea]r', re.IGNORECASE) if iam_memester_regex.match(message.clean_content): await message.author.add_roles(self.new_memester) await message.delete() await self.join_log.send(f"{message.author.mention} joined the server.") if self.limit_reset.is_running(): self.join_counter += 1 if self.join_counter >= self.join_limit and self.join_limit > 0 and self.limit_reset.is_running(): default_role = message.guild.default_role overwrite = message.channel.overwrites_for(default_role) overwrite.send_messages = False await self.rules_channel.set_permissions(default_role, overwrite=overwrite) time_diff = self.limit_reset.next_iteration - datetime.datetime.now(datetime.timezone.utc) if self.lockdown_channel: def is_previous_lockdown_message(m): return "join limit was exceeded try again in" in m.content await self.lockdown_channel.purge(limit=100, check=is_previous_lockdown_message) await self.lockdown_channel.send(f"current join limit was exceeded try again in {round(time_diff.seconds / 3600)} hours") return content = message.content.lower() with open("data/rules_channel_phrases.json") as f: phrases = json.load(f) curses = ["fuck you", "fuck u", "stupid bot", "fucking bot"] has_confirm_in_message = "yes" in content or "i have" in content if "gaston is always tight" in content.lower(): await channel.send(choice(phrases["tight"])) return if any([c in content for c in curses]): await channel.send(choice(phrases["curse"])) return if message.role_mentions: await channel.send(choice(phrases["pinged"])) return if has_confirm_in_message: if self.bucket.update_rate_limit(message): await channel.send(choice(phrases['repeat'])) return await channel.send(choice(phrases["yes"])) return if "sex-shack" in content: if self.bucket.update_rate_limit(message): await channel.send(choice(phrases['repeat'])) return await channel.send(choice(phrases["shack"])) return if "general-discussion" in content or re.match(r"#(\w+-?)+", content) or message.channel_mentions: if self.bucket.update_rate_limit(message): await channel.send(choice(phrases['repeat'])) return await channel.send(choice(phrases["channel"])) return async def fetch_member_via_api(self, user_id): """ for fetching the user via the api if the member may not be in the cache """ try: return await self.animemes_guild.fetch_member(user_id) except Exception as e: logger = logging.getLogger("PoutyBot") logger.warning(f"Could not fetch user with user id {user_id}") return None @tasks.loop(minutes=1) async def check_for_new_memester(self): rows = await self.fetch_new_memesters() try: for row in rows: if row["time_over"] < datetime.datetime.utcnow(): member = self.animemes_guild.get_member(row["user_id"]) if member
) super().__init__(data=data, variables=variables) self.alpha = alpha self.legend = legend def plot(self, ax, kws): # Draw a test plot, using the passed in kwargs. The goal here is to # honor both (a) the current state of the plot cycler and (b) the # specified kwargs on all the lines we will draw, overriding when # relevant with the data semantics. Note that we won't cycle # internally; in other words, if ``hue`` is not used, all elements will # have the same color, but they will have the color that you would have # gotten from the corresponding matplotlib function, and calling the # function will advance the axes property cycle. scout_size = max( np.atleast_1d(kws.get("s", [])).shape[0], np.atleast_1d(kws.get("c", [])).shape[0], ) scout_x = scout_y = np.full(scout_size, np.nan) scout = ax.scatter(scout_x, scout_y, **kws) s = kws.pop("s", scout.get_sizes()) c = kws.pop("c", scout.get_facecolors()) scout.remove() kws.pop("color", None) # TODO is this optimal? # --- Determine the visual attributes of the plot data = self.plot_data[list(self.variables)].dropna() if not data.size: return # Define the vectors of x and y positions empty = np.full(len(data), np.nan) x = data.get("x", empty) y = data.get("y", empty) # Apply the mapping from semantic varibles to artist attributes if "hue" in self.variables: c = self._hue_map(data["hue"]) if "size" in self.variables: s = self._size_map(data["size"]) # Set defaults for other visual attributres kws.setdefault("linewidth", .08 * np.sqrt(np.percentile(s, 10))) kws.setdefault("edgecolor", "w") if "style" in self.variables: # Use a representative marker so scatter sets the edgecolor # properly for line art markers. We currently enforce either # all or none line art so this works. example_level = self._style_map.levels[0] example_marker = self._style_map(example_level, "marker") kws.setdefault("marker", example_marker) # TODO this makes it impossible to vary alpha with hue which might # otherwise be useful? Should we just pass None? kws["alpha"] = 1 if self.alpha == "auto" else self.alpha # Draw the scatter plot args = np.asarray(x), np.asarray(y), np.asarray(s), np.asarray(c) points = ax.scatter(*args, **kws) # Update the paths to get different marker shapes. # This has to be done here because ax.scatter allows varying sizes # and colors but only a single marker shape per call. if "style" in self.variables: p = [self._style_map(val, "path") for val in data["style"]] points.set_paths(p) # Finalize the axes details self.label_axes(ax) if self.legend: self.add_legend_data(ax) handles, _ = ax.get_legend_handles_labels() if handles: ax.legend() _relational_docs = dict( # --- Introductory prose main_api_narrative=dedent("""\ The relationship between ``x`` and ``y`` can be shown for different subsets of the data using the ``hue``, ``size``, and ``style`` parameters. These parameters control what visual semantics are used to identify the different subsets. It is possible to show up to three dimensions independently by using all three semantic types, but this style of plot can be hard to interpret and is often ineffective. Using redundant semantics (i.e. both ``hue`` and ``style`` for the same variable) can be helpful for making graphics more accessible. See the :ref:`tutorial <relational_tutorial>` for more information.\ """), relational_semantic_narrative=dedent("""\ The default treatment of the ``hue`` (and to a lesser extent, ``size``) semantic, if present, depends on whether the variable is inferred to represent "numeric" or "categorical" data. In particular, numeric variables are represented with a sequential colormap by default, and the legend entries show regular "ticks" with values that may or may not exist in the data. This behavior can be controlled through various parameters, as described and illustrated below.\ """), # --- Shared function parameters data_vars=dedent("""\ x, y : names of variables in ``data`` or vector data, optional Input data variables; must be numeric. Can pass data directly or reference columns in ``data``.\ """), data=dedent("""\ data : DataFrame, array, or list of arrays, optional Input data structure. If ``x`` and ``y`` are specified as names, this should be a "long-form" DataFrame containing those columns. Otherwise it is treated as "wide-form" data and grouping variables are ignored. See the examples for the various ways this parameter can be specified and the different effects of each.\ """), palette=dedent("""\ palette : string, list, dict, or matplotlib colormap An object that determines how colors are chosen when ``hue`` is used. It can be the name of a seaborn palette or matplotlib colormap, a list of colors (anything matplotlib understands), a dict mapping levels of the ``hue`` variable to colors, or a matplotlib colormap object.\ """), hue_order=dedent("""\ hue_order : list, optional Specified order for the appearance of the ``hue`` variable levels, otherwise they are determined from the data. Not relevant when the ``hue`` variable is numeric.\ """), hue_norm=dedent("""\ hue_norm : tuple or Normalize object, optional Normalization in data units for colormap applied to the ``hue`` variable when it is numeric. Not relevant if it is categorical.\ """), sizes=dedent("""\ sizes : list, dict, or tuple, optional An object that determines how sizes are chosen when ``size`` is used. It can always be a list of size values or a dict mapping levels of the ``size`` variable to sizes. When ``size`` is numeric, it can also be a tuple specifying the minimum and maximum size to use such that other values are normalized within this range.\ """), size_order=dedent("""\ size_order : list, optional Specified order for appearance of the ``size`` variable levels, otherwise they are determined from the data. Not relevant when the ``size`` variable is numeric.\ """), size_norm=dedent("""\ size_norm : tuple or Normalize object, optional Normalization in data units for scaling plot objects when the ``size`` variable is numeric.\ """), markers=dedent("""\ markers : boolean, list, or dictionary, optional Object determining how to draw the markers for different levels of the ``style`` variable. Setting to ``True`` will use default markers, or you can pass a list of markers or a dictionary mapping levels of the ``style`` variable to markers. Setting to ``False`` will draw marker-less lines. Markers are specified as in matplotlib.\ """), style_order=dedent("""\ style_order : list, optional Specified order for appearance of the ``style`` variable levels otherwise they are determined from the data. Not relevant when the ``style`` variable is numeric.\ """), units=dedent("""\ units : {long_form_var} Grouping variable identifying sampling units. When used, a separate line will be drawn for each unit with appropriate semantics, but no legend entry will be added. Useful for showing distribution of experimental replicates when exact identities are not needed. """), estimator=dedent("""\ estimator : name of pandas method or callable or None, optional Method for aggregating across multiple observations of the ``y`` variable at the same ``x`` level. If ``None``, all observations will be drawn.\ """), ci=dedent("""\ ci : int or "sd" or None, optional Size of the confidence interval to draw when aggregating with an estimator. "sd" means to draw the standard deviation of the data. Setting to ``None`` will skip bootstrapping.\ """), n_boot=dedent("""\ n_boot : int, optional Number of bootstraps to use for computing the confidence interval.\ """), seed=dedent("""\ seed : int, numpy.random.Generator, or numpy.random.RandomState, optional Seed or random number generator for reproducible bootstrapping.\ """), legend=dedent("""\ legend : "brief", "full", or False, optional How to draw the legend. If "brief", numeric ``hue`` and ``size`` variables will be represented with a sample of evenly spaced values. If "full", every group will get an entry in the legend. If ``False``, no legend data is added and no legend is drawn.\ """), ax_in=dedent("""\ ax : matplotlib Axes, optional Axes object to draw the plot onto, otherwise uses the current Axes.\ """), ax_out=dedent("""\ ax : matplotlib Axes Returns the Axes object with the plot drawn onto it.\ """), # --- Repeated phrases long_form_var="name of variables in ``data`` or vector data, optional", ) _relational_docs.update(_facet_docs) @_deprecate_positional_args def lineplot( *, x=None, y=None, hue=None, size=None, style=None, data=None, palette=None, hue_order=None, hue_norm=None, sizes=None, size_order=None, size_norm=None, dashes=True, markers=None, style_order=None, units=None, estimator="mean", ci=95, n_boot=1000, seed=None, sort=True, err_style="band", err_kws=None, legend="brief", ax=None, **kwargs ): variables = _LinePlotter.get_semantics(locals()) p = _LinePlotter( data=data, variables=variables, estimator=estimator, ci=ci, n_boot=n_boot, seed=seed, sort=sort, err_style=err_style, err_kws=err_kws, legend=legend, ) p.map_hue(palette=palette, order=hue_order, norm=hue_norm) p.map_size(sizes=sizes, order=size_order, norm=size_norm) p.map_style(markers=markers, dashes=dashes, order=style_order) if ax is None: ax =
self.input_dirs: if not self.input_paths: self.input_paths = self.input_dirs else: raise DistutilsOptionError( 'input-dirs and input-paths are mutually exclusive' ) if self.no_default_keywords: keywords = {} else: keywords = DEFAULT_KEYWORDS.copy() keywords.update(parse_keywords(listify_value(self.keywords))) self.keywords = keywords if not self.keywords: raise DistutilsOptionError('you must specify new keywords if you ' 'disable the default ones') if not self.output_file: raise DistutilsOptionError('no output file specified') if self.no_wrap and self.width: raise DistutilsOptionError("'--no-wrap' and '--width' are mutually " "exclusive") if not self.no_wrap and not self.width: self.width = 76 elif self.width is not None: self.width = int(self.width) if self.sort_output and self.sort_by_file: raise DistutilsOptionError("'--sort-output' and '--sort-by-file' " "are mutually exclusive") if self.input_paths: if isinstance(self.input_paths, str): self.input_paths = re.split(r',\s*', self.input_paths) elif self.distribution is not None: self.input_paths = dict.fromkeys([ k.split('.', 1)[0] for k in (self.distribution.packages or ()) ]).keys() else: self.input_paths = [] if not self.input_paths: raise DistutilsOptionError("no input files or directories specified") for path in self.input_paths: if not os.path.exists(path): raise DistutilsOptionError("Input path: %s does not exist" % path) self.add_comments = listify_value(self.add_comments or (), ",") if self.distribution: if not self.project: self.project = self.distribution.get_name() if not self.version: self.version = self.distribution.get_version() if self.add_location == 'never': self.no_location = True elif self.add_location == 'file': self.include_lineno = False def run(self): mappings = self._get_mappings() with open(self.output_file, 'wb') as outfile: catalog = Catalog(project=self.project, version=self.version, msgid_bugs_address=self.msgid_bugs_address, copyright_holder=self.copyright_holder, charset=self.charset) for path, method_map, options_map in mappings: def callback(filename, method, options): if method == 'ignore': return # If we explicitly provide a full filepath, just use that. # Otherwise, path will be the directory path and filename # is the relative path from that dir to the file. # So we can join those to get the full filepath. if os.path.isfile(path): filepath = path else: filepath = os.path.normpath(os.path.join(path, filename)) optstr = '' if options: optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for k, v in options.items()]) self.log.info('extracting messages from %s%s', filepath, optstr) if os.path.isfile(path): current_dir = os.getcwd() extracted = check_and_call_extract_file( path, method_map, options_map, callback, self.keywords, self.add_comments, self.strip_comments, current_dir ) else: extracted = extract_from_dir( path, method_map, options_map, keywords=self.keywords, comment_tags=self.add_comments, callback=callback, strip_comment_tags=self.strip_comments ) for filename, lineno, message, comments, context in extracted: if os.path.isfile(path): filepath = filename # already normalized else: filepath = os.path.normpath(os.path.join(path, filename)) catalog.add(message, None, [(filepath, lineno)], auto_comments=comments, context=context) self.log.info('writing PO template file to %s', self.output_file) write_po(outfile, catalog, width=self.width, no_location=self.no_location, omit_header=self.omit_header, sort_output=self.sort_output, sort_by_file=self.sort_by_file, include_lineno=self.include_lineno) def _get_mappings(self): mappings = [] if self.mapping_file: with open(self.mapping_file) as fileobj: method_map, options_map = parse_mapping(fileobj) for path in self.input_paths: mappings.append((path, method_map, options_map)) elif getattr(self.distribution, 'message_extractors', None): message_extractors = self.distribution.message_extractors for path, mapping in message_extractors.items(): if isinstance(mapping, str): method_map, options_map = parse_mapping(StringIO(mapping)) else: method_map, options_map = [], {} for pattern, method, options in mapping: method_map.append((pattern, method)) options_map[pattern] = options or {} mappings.append((path, method_map, options_map)) else: for path in self.input_paths: mappings.append((path, DEFAULT_MAPPING, {})) return mappings def check_message_extractors(dist, name, value): """Validate the ``message_extractors`` keyword argument to ``setup()``. :param dist: the distutils/setuptools ``Distribution`` object :param name: the name of the keyword argument (should always be "message_extractors") :param value: the value of the keyword argument :raise `DistutilsSetupError`: if the value is not valid """ assert name == 'message_extractors' if not isinstance(value, dict): raise DistutilsSetupError('the value of the "message_extractors" ' 'parameter must be a dictionary') class init_catalog(Command): """New catalog initialization command for use in ``setup.py`` scripts. If correctly installed, this command is available to Setuptools-using setup scripts automatically. For projects using plain old ``distutils``, the command needs to be registered explicitly in ``setup.py``:: from babel.messages.frontend import init_catalog setup( ... cmdclass = {'init_catalog': init_catalog} ) """ description = 'create a new catalog based on a POT file' user_options = [ ('domain=', 'D', "domain of PO file (default 'messages')"), ('input-file=', 'i', 'name of the input file'), ('output-dir=', 'd', 'path to output directory'), ('output-file=', 'o', "name of the output file (default " "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"), ('locale=', 'l', 'locale for the new localized catalog'), ('width=', 'w', 'set output line width (default 76)'), ('no-wrap', None, 'do not break long message lines, longer than the output line width, ' 'into several lines'), ] boolean_options = ['no-wrap'] def initialize_options(self): self.output_dir = None self.output_file = None self.input_file = None self.locale = None self.domain = 'messages' self.no_wrap = False self.width = None def finalize_options(self): if not self.input_file: raise DistutilsOptionError('you must specify the input file') if not self.locale: raise DistutilsOptionError('you must provide a locale for the ' 'new catalog') try: self._locale = Locale.parse(self.locale) except UnknownLocaleError as e: raise DistutilsOptionError(e) if not self.output_file and not self.output_dir: raise DistutilsOptionError('you must specify the output directory') if not self.output_file: self.output_file = os.path.join(self.output_dir, self.locale, 'LC_MESSAGES', self.domain + '.po') if not os.path.exists(os.path.dirname(self.output_file)): os.makedirs(os.path.dirname(self.output_file)) if self.no_wrap and self.width: raise DistutilsOptionError("'--no-wrap' and '--width' are mutually " "exclusive") if not self.no_wrap and not self.width: self.width = 76 elif self.width is not None: self.width = int(self.width) def run(self): self.log.info( 'creating catalog %s based on %s', self.output_file, self.input_file ) with open(self.input_file, 'rb') as infile: # Although reading from the catalog template, read_po must be fed # the locale in order to correctly calculate plurals catalog = read_po(infile, locale=self.locale) catalog.locale = self._locale catalog.revision_date = datetime.now(LOCALTZ) catalog.fuzzy = False with open(self.output_file, 'wb') as outfile: write_po(outfile, catalog, width=self.width) class update_catalog(Command): """Catalog merging command for use in ``setup.py`` scripts. If correctly installed, this command is available to Setuptools-using setup scripts automatically. For projects using plain old ``distutils``, the command needs to be registered explicitly in ``setup.py``:: from babel.messages.frontend import update_catalog setup( ... cmdclass = {'update_catalog': update_catalog} ) .. versionadded:: 0.9 """ description = 'update message catalogs from a POT file' user_options = [ ('domain=', 'D', "domain of PO file (default 'messages')"), ('input-file=', 'i', 'name of the input file'), ('output-dir=', 'd', 'path to base directory containing the catalogs'), ('output-file=', 'o', "name of the output file (default " "'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"), ('omit-header', None, "do not include msgid "" entry in header"), ('locale=', 'l', 'locale of the catalog to compile'), ('width=', 'w', 'set output line width (default 76)'), ('no-wrap', None, 'do not break long message lines, longer than the output line width, ' 'into several lines'), ('ignore-obsolete=', None, 'whether to omit obsolete messages from the output'), ('init-missing=', None, 'if any output files are missing, initialize them first'), ('no-fuzzy-matching', 'N', 'do not use fuzzy matching'), ('update-header-comment', None, 'update target header comment'), ('previous', None, 'keep previous msgids of translated messages'), ] boolean_options = [ 'omit-header', 'no-wrap', 'ignore-obsolete', 'init-missing', 'no-fuzzy-matching', 'previous', 'update-header-comment', ] def initialize_options(self): self.domain = 'messages' self.input_file = None self.output_dir = None self.output_file = None self.omit_header = False self.locale = None self.width = None self.no_wrap = False self.ignore_obsolete = False self.init_missing = False self.no_fuzzy_matching = False self.update_header_comment = False self.previous = False def finalize_options(self): if not self.input_file: raise DistutilsOptionError('you must specify the input file') if not self.output_file and not self.output_dir: raise DistutilsOptionError('you must specify the output file or ' 'directory') if self.output_file and not self.locale: raise DistutilsOptionError('you must specify the locale') if self.init_missing: if not self.locale: raise DistutilsOptionError('you must specify the locale for ' 'the init-missing option to work') try: self._locale = Locale.parse(self.locale) except UnknownLocaleError as e: raise DistutilsOptionError(e) else: self._locale = None if self.no_wrap and self.width: raise DistutilsOptionError("'--no-wrap' and '--width' are mutually " "exclusive") if not self.no_wrap and not self.width: self.width = 76 elif self.width is not None: self.width = int(self.width) if self.no_fuzzy_matching and self.previous: self.previous = False def run(self): po_files = [] if not self.output_file: if self.locale: po_files.append((self.locale, os.path.join(self.output_dir, self.locale, 'LC_MESSAGES', self.domain + '.po'))) else: for locale in os.listdir(self.output_dir): po_file = os.path.join(self.output_dir, locale, 'LC_MESSAGES', self.domain + '.po') if os.path.exists(po_file): po_files.append((locale, po_file)) else: po_files.append((self.locale, self.output_file)) if not po_files: raise DistutilsOptionError('no message catalogs found') domain = self.domain if not domain: domain = os.path.splitext(os.path.basename(self.input_file))[0] with open(self.input_file, 'rb') as infile: template = read_po(infile) for locale, filename in po_files: if self.init_missing and not os.path.exists(filename): self.log.info( 'creating catalog %s based on %s', filename, self.input_file ) with open(self.input_file, 'rb') as infile: # Although reading from the catalog template, read_po must # be fed the locale in order to correctly calculate plurals catalog = read_po(infile, locale=self.locale) catalog.locale = self._locale catalog.revision_date = datetime.now(LOCALTZ) catalog.fuzzy = False with open(filename, 'wb') as outfile: write_po(outfile,
item in mnt['items']: if 'urls' in item: for url in item['urls']: removeurls.append(url['url']) if 'images' in item: for uload in item['images']: self.wc.grab_json_response( '/data/set', 'RP_RemoveFile({0}, 0)'.format( uload['slotId'])) for url in removeurls: url = url.replace(':', '\:') params = urllib.urlencode({ 'RP_VmAllocateUnMountUrl({0},{1},0,)'.format( self.username, url): ''}) result = self.wc.grab_json_response('/data?set', params, referer=self.adp_referer) if not result: result = self.wc.grab_json_response('/data/set', params, referer=self.adp_referer) if result['return'] != 'Success': raise Exception(result['reason']) self.weblogout() def fetch_psu_firmware(self): return [] def fetch_agentless_firmware(self): cd = self.get_cached_data('lenovo_cached_adapters_fu') if cd: adapterdata, fwu = cd else: adapterdata = None if not adapterdata: if self.updating: raise pygexc.TemporaryError( 'Cannot read extended inventory during firmware update') if self.wc: adapterdata = self.wc.grab_json_response( self.ADP_URL, referer=self.adp_referer) if self.ADP_FU_URL: fwu = self.wc.grab_json_response(self.ADP_FU_URL, referer=self.adp_referer) else: fwu = None if adapterdata: self.datacache['lenovo_cached_adapters_fu'] = ( (adapterdata, fwu), util._monotonic_time()) if adapterdata and 'items' in adapterdata: anames = {} for adata in adapterdata['items']: aname = adata[self.ADP_NAME] if aname in anames: anames[aname] += 1 aname = '{0} {1}'.format(aname, anames[aname]) else: anames[aname] = 1 donenames = set([]) for fundata in adata[self.ADP_FUN]: fdata = fundata.get('firmwares', ()) for firm in fdata: fname = firm['firmwareName'].rstrip() if '.' in fname: fname = firm['description'].rstrip() if fname in donenames: # ignore redundant entry continue if not fname: continue donenames.add(fname) bdata = {} if 'versionStr' in firm and firm['versionStr']: bdata['version'] = firm['versionStr'] if ('releaseDate' in firm and firm['releaseDate'] and firm['releaseDate'] != 'N/A'): try: bdata['date'] = self._parse_builddate( firm['releaseDate']) except ValueError: pass yield ('{0} {1}'.format(aname, fname), bdata) for fwi in fwu.get('items'): if fwi.get('key', -1) == adata.get('key', -2): if fwi.get('fw_status', 0) == 2: bdata = {} if 'fw_version_pend' in fwi: bdata['version'] = fwi['fw_version_pend'] yield('{0} Pending Update'.format(aname), bdata) for disk in self.disk_inventory(): yield disk self.weblogout() def disk_inventory(self, mode=0): if mode==1: # Bypass IMM hardware inventory for now return storagedata = self.get_cached_data('lenovo_cached_storage') if not storagedata: if self.wc: storagedata = self.wc.grab_json_response( '/designs/imm/dataproviders/raid_alldevices.php') if storagedata: self.datacache['lenovo_cached_storage'] = ( storagedata, util._monotonic_time()) if storagedata and 'items' in storagedata: for adp in storagedata['items']: if 'storage.vpd.productName' not in adp: continue adpname = adp['storage.vpd.productName'] if 'children' not in adp: adp['children'] = () for diskent in adp['children']: bdata = {} diskname = '{0} Disk {1}'.format( adpname, diskent['storage.slotNo']) bdata['model'] = diskent[ 'storage.vpd.productName'].rstrip() bdata['version'] = diskent['storage.firmwares'][0][ 'versionStr'] yield (diskname, bdata) def get_hw_inventory(self): hwmap = self.hardware_inventory_map() for key in natural_sort(hwmap): yield (key, hwmap[key]) def get_hw_descriptions(self): hwmap = self.hardware_inventory_map() for key in natural_sort(hwmap): yield key def get_component_inventory(self, compname): hwmap = self.hardware_inventory_map() try: return hwmap[compname] except KeyError: return None def get_oem_sensor_names(self, ipmicmd): try: if self._energymanager is None: self._energymanager = energy.EnergyManager(ipmicmd) return self._energymanager.supportedmeters except pygexc.UnsupportedFunctionality: return () def get_oem_sensor_descriptions(self, ipmicmd): return [{'name': x, 'type': 'Energy' } for x in self.get_oem_sensor_names(ipmicmd)] def get_oem_sensor_reading(self, name, ipmicmd): if self._energymanager is None: self._energymanager = energy.EnergyManager(ipmicmd) if name == 'AC Energy': kwh = self._energymanager.get_ac_energy(ipmicmd) elif name == 'DC Energy': kwh = self._energymanager.get_dc_energy(ipmicmd) else: raise pygexc.UnsupportedFunctionality('No sunch sensor ' + name) return sdr.SensorReading({'name': name, 'imprecision': None, 'value': kwh, 'states': [], 'state_ids': [], 'health': pygconst.Health.Ok, 'type': 'Energy'}, 'kWh') def weblogout(self): if self._wc: try: self._wc.grab_json_response(self.logouturl) except Exception: pass self._wc = None def hardware_inventory_map(self): hwmap = self.get_cached_data('lenovo_cached_hwmap') if hwmap: return hwmap hwmap = {} enclosureuuid = self.get_property('/v2/ibmc/smm/chassis/uuid') if enclosureuuid: bay = hex(int(self.get_property('/v2/cmm/sp/7'))).replace( '0x', '') serial = self.get_property('/v2/ibmc/smm/chassis/sn') model = self.get_property('/v2/ibmc/smm/chassis/mtm') hwmap['Enclosure'] = { 'UUID': fixup_uuid(enclosureuuid), 'Bay': bay, 'Model': fixup_str(model), 'Serial': fixup_str(serial), } for disk in self.disk_inventory(mode=1): # hardware mode hwmap[disk[0]] = disk[1] adapterdata = self.get_cached_data('lenovo_cached_adapters') if not adapterdata: if self.updating: raise pygexc.TemporaryError( 'Cannot read extended inventory during firmware update') if self.wc: adapterdata = self.wc.grab_json_response( self.ADP_URL, referer=self.adp_referer) if adapterdata: self.datacache['lenovo_cached_adapters'] = ( adapterdata, util._monotonic_time()) if adapterdata and 'items' in adapterdata: anames = {} for adata in adapterdata['items']: skipadapter = False clabel = adata[self.ADP_LABEL] if clabel == 'Unknown': continue if clabel != 'Onboard': aslot = adata[self.ADP_SLOTNO] if clabel == 'ML2': clabel = 'ML2 (Slot {0})'.format(aslot) else: clabel = 'Slot {0}'.format(aslot) aname = adata[self.ADP_NAME] bdata = {'location': clabel, 'name': aname} if aname in anames: anames[aname] += 1 aname = '{0} {1}'.format(aname, anames[aname]) else: anames[aname] = 1 for fundata in adata[self.ADP_FUN]: bdata['pcislot'] = '{0:02x}:{1:02x}'.format( fundata[self.BUSNO], fundata[self.DEVNO]) serialdata = fundata.get(self.ADP_SERIALNO, None) if (serialdata and serialdata != 'N/A' and '---' not in serialdata): bdata['serial'] = serialdata partnum = fundata.get(self.ADP_PARTNUM, None) if partnum and partnum != 'N/A': bdata['Part Number'] = partnum cardtype = funtypes.get(fundata.get('funType', None), None) if cardtype is not None: bdata['Type'] = cardtype venid = fundata.get(self.ADP_VENID, None) if venid is not None: bdata['PCI Vendor ID'] = '{0:04x}'.format(venid) devid = fundata.get(self.ADP_DEVID, None) if devid is not None: bdata['PCI Device ID'] = '{0:04x}'.format(devid) venid = fundata.get(self.ADP_SUBVENID, None) if venid is not None: bdata['PCI Subsystem Vendor ID'] = '{0:04x}'.format( venid) devid = fundata.get(self.ADP_SUBDEVID, None) if devid is not None: bdata['PCI Subsystem Device ID'] = '{0:04x}'.format( devid) fruno = fundata.get(self.ADP_FRU, None) if fruno is not None: bdata['FRU Number'] = fruno if self.PORTS in fundata: for portinfo in fundata[self.PORTS]: for lp in portinfo['logicalPorts']: ma = lp['networkAddr'] ma = ':'.join( [ma[i:i+2] for i in range( 0, len(ma), 2)]).lower() bdata['MAC Address {0}'.format( portinfo['portIndex'])] = ma elif clabel == 'Onboard': # skip the various non-nic skipadapter = True if not skipadapter: hwmap[aname] = bdata self.datacache['lenovo_cached_hwmap'] = (hwmap, util._monotonic_time()) self.weblogout() return hwmap def get_firmware_inventory(self, bmcver, components): # First we fetch the system firmware found in imm properties # then check for agentless, if agentless, get adapter info using # https, using the caller TLS verification scheme components = set(components) if not components or set(('imm', 'xcc', 'bmc', 'core')) & components: rsp = self.ipmicmd.xraw_command(netfn=0x3a, command=0x50) immverdata = self.parse_imm_buildinfo(rsp['data']) bdata = { 'version': bmcver, 'build': immverdata[0], 'date': immverdata[1]} yield (self.bmcname, bdata) bdata = self.fetch_grouped_properties({ 'build': '/v2/ibmc/dm/fw/imm2/backup_build_id', 'version': '/v2/ibmc/dm/fw/imm2/backup_build_version', 'date': '/v2/ibmc/dm/fw/imm2/backup_build_date'}) if bdata: yield ('{0} Backup'.format(self.bmcname), bdata) bdata = self.fetch_grouped_properties({ 'build': '/v2/ibmc/trusted_buildid', }) if bdata: yield ('{0} Trusted Image'.format(self.bmcname), bdata) if not components or set(('uefi', 'bios', 'core')) & components: bdata = self.fetch_grouped_properties({ 'build': '/v2/bios/build_id', 'version': '/v2/bios/build_version', 'date': '/v2/bios/build_date'}) if bdata: yield ('UEFI', bdata) else: yield ('UEFI', {'version': 'unknown'}) bdata = self.fetch_grouped_properties({ 'build': '/v2/ibmc/dm/fw/bios/backup_build_id', 'version': '/v2/ibmc/dm/fw/bios/backup_build_version'}) if bdata: yield ('UEFI Backup', bdata) # Note that the next pending could be pending for either primary # or backup, so can't promise where it will go bdata = self.fetch_grouped_properties({ 'build': '/v2/bios/pending_build_id'}) if bdata: yield ('UEFI Pending Update', bdata) if not components or set(('fpga', 'core')) & components: try: fpga = self.ipmicmd.xraw_command(netfn=0x3a, command=0x6b, data=(0,)) fpga = '{0}.{1}.{2}'.format(*[ord(x) for x in fpga['data']]) yield ('FPGA', {'version': fpga}) except pygexc.IpmiException as ie: if ie.ipmicode != 193: raise if (not components or (components - set(( 'core', 'uefi', 'bios', 'bmc', 'xcc', 'imm', 'fpga', 'lxpm')))): for firm in self.fetch_agentless_firmware(): yield firm class XCCClient(IMMClient): logouturl = '/api/providers/logout' bmcname = 'XCC' ADP_URL = '/api/dataset/imm_adapters?params=pci_GetAdapters' ADP_NAME = 'adapterName' ADP_FUN = 'functions' ADP_FU_URL = '/api/function/adapter_update?params=pci_GetAdapterListAndFW' ADP_LABEL = 'connectorLabel' ADP_SLOTNO = 'slotNo' ADP_OOB = 'oobSupported' ADP_PARTNUM = 'vpd_partNo' ADP_SERIALNO = 'vpd_serialNo' ADP_VENID = 'generic_vendorId' ADP_SUBVENID = 'generic_subVendor' ADP_DEVID = 'generic_devId' ADP_SUBDEVID = 'generic_subDevId' ADP_FRU = 'vpd_cardSKU' BUSNO = 'generic_busNo' PORTS = 'network_pPorts' DEVNO = 'generic_devNo' def __init__(self, ipmicmd): super(XCCClient, self).__init__(ipmicmd) self.adp_referer = None def get_description(self): dsc = self.wc.grab_json_response('/DeviceDescription.json') dsc = dsc[0] if not dsc.get('u-height', None): if dsc.get('enclosure-machinetype-model', '').startswith('7Y36'): return {'height': 2, 'slot': 0} else: return {} return {'height': int(dsc['u-height']), 'slot': int(dsc['slot'])} def clear_system_configuration(self): res = self.wc.grab_json_response_with_status( '/redfish/v1/Systems/1/Bios/Actions/Bios.ResetBios', {'Action': 'Bios.ResetBios'}, headers={'Authorization': 'Basic ' + base64.b64encode( self.username + ':' + self.password), 'Content-Type': 'application/json'}) if res[1] < 200 or res[1] >= 300: raise Exception( 'Unexpected response to clear configuration: {0}'.format( res[0])) def get_webclient(self, login=True): cv = self.ipmicmd.certverify wc = webclient.SecureHTTPConnection(self.imm, 443, verifycallback=cv) wc.vintage = util._monotonic_time() try: wc.connect() except socket.error as se: if se.errno != errno.ECONNREFUSED: raise return None if not login: return wc adata = json.dumps({'username': self.username, 'password': <PASSWORD> }) headers = {'Connection': 'keep-alive', 'Content-Type': 'application/json'} wc.request('POST', '/api/login', adata, headers) rsp = wc.getresponse() if rsp.status == 200: rspdata = json.loads(rsp.read()) wc.set_header('Content-Type', 'application/json') wc.set_header('Authorization', 'Bearer ' + rspdata['access_token']) if '_csrf_token' in wc.cookies: wc.set_header('X-XSRF-TOKEN', wc.cookies['_csrf_token']) return wc def _raid_number_map(self, controller): themap = {} rsp = self.wc.grab_json_response( '/api/function/raid_conf?' 'params=raidlink_GetDisksToConf,{0}'.format(controller)) for lvl in rsp['items'][0]['supported_raidlvl']: mapdata = (lvl['rdlvl'], lvl['maxSpan']) raidname = lvl['rdlvlstr'].replace(' ', '').lower() themap[raidname] = mapdata raidname = raidname.replace('raid', 'r') themap[raidname] = mapdata raidname = raidname.replace('r', '') themap[raidname] = mapdata return themap
raise ValueError('Bond descriptor scaling is only possible if additional bond features are provided.') # normalize target weights if self.target_weights is not None: avg_weight = sum(self.target_weights)/len(self.target_weights) self.target_weights = [w/avg_weight for w in self.target_weights] if min(self.target_weights) < 0: raise ValueError('Provided target weights must be non-negative.') # check if key molecule index is outside of the number of molecules if self.split_key_molecule >= self.number_of_molecules: raise ValueError('The index provided with the argument `--split_key_molecule` must be less than the number of molecules. Note that this index begins with 0 for the first molecule. ') class PredictArgs(CommonArgs): """:class:`PredictArgs` includes :class:`CommonArgs` along with additional arguments used for predicting with a Chemprop model.""" test_path: str """Path to CSV file containing testing data for which predictions will be made.""" preds_path: str """Path to CSV file where predictions will be saved.""" drop_extra_columns: bool = False """Whether to drop all columns from the test data file besides the SMILES columns and the new prediction columns.""" ensemble_variance: bool = False """Deprecated. Whether to calculate the variance of ensembles as a measure of epistemic uncertainty. If True, the variance is saved as an additional column for each target in the preds_path.""" individual_ensemble_predictions: bool = False """Whether to return the predictions made by each of the individual models rather than the average of the ensemble""" # Uncertainty arguments uncertainty_method: Literal[ 'mve', 'ensemble', 'evidential_epistemic', 'evidential_aleatoric', 'evidential_total', 'classification', 'dropout', 'spectra_roundrobin', ] = None """The method of calculating uncertainty.""" calibration_method: Literal['zscaling', 'tscaling', 'zelikman_interval', 'mve_weighting', 'platt', 'isotonic'] = None """Methods used for calibrating the uncertainty calculated with uncertainty method.""" evaluation_methods: List[str] = None """The methods used for evaluating the uncertainty performance if the test data provided includes targets. Available methods are [nll, miscalibration_area, ence, spearman] or any available classification or multiclass metric.""" evaluation_scores_path: str = None """Location to save the results of uncertainty evaluations.""" uncertainty_dropout_p: float = 0.1 """The probability to use for Monte Carlo dropout uncertainty estimation.""" dropout_sampling_size: int = 10 """The number of samples to use for Monte Carlo dropout uncertainty estimation. Distinct from the dropout used during training.""" calibration_interval_percentile: float = 95 """Sets the percentile used in the calibration methods. Must be in the range (1,100).""" regression_calibrator_metric: Literal['stdev', 'interval'] = None """Regression calibrators can output either a stdev or an inverval. """ calibration_path: str = None """Path to data file to be used for uncertainty calibration.""" calibration_features_path: str = None """Path to features data to be used with the uncertainty calibration dataset.""" calibration_phase_features_path: str = None """ """ calibration_atom_descriptors_path: str = None """Path to the extra atom descriptors.""" calibration_bond_features_path: str = None """Path to the extra bond descriptors that will be used as bond features to featurize a given molecule.""" @property def ensemble_size(self) -> int: """The number of models in the ensemble.""" return len(self.checkpoint_paths) def process_args(self) -> None: super(PredictArgs, self).process_args() if self.regression_calibrator_metric is None: if self.calibration_method == 'zelikman_interval': self.regression_calibrator_metric = 'interval' else: self.regression_calibrator_metric = 'stdev' if self.uncertainty_method == 'dropout' and version.parse(torch.__version__) < version.parse('1.9.0'): raise ValueError('Dropout uncertainty is only supported for pytorch versions >= 1.9.0') self.smiles_columns = chemprop.data.utils.preprocess_smiles_columns( path=self.test_path, smiles_columns=self.smiles_columns, number_of_molecules=self.number_of_molecules, ) if self.checkpoint_paths is None or len(self.checkpoint_paths) == 0: raise ValueError('Found no checkpoints. Must specify --checkpoint_path <path> or ' '--checkpoint_dir <dir> containing at least one checkpoint.') if self.ensemble_variance == True: if self.uncertainty_method in ['ensemble', None]: warn( 'The `--ensemble_variance` argument is deprecated and should \ be replaced with `--uncertainty_method ensemble`.', DeprecationWarning, ) self.uncertainty_method = 'ensemble' else: raise ValueError( f'Only one uncertainty method can be used at a time. \ The arguement `--ensemble_variance` was provided along \ with the uncertainty method {self.uncertainty_method}. The `--ensemble_variance` \ argument is deprecated and should be replaced with `--uncertainty_method ensemble`.' ) if self.calibration_interval_percentile <= 1 or self.calibration_interval_percentile >= 100: raise ValueError('The calibration interval must be a percentile value in the range (1,100).') if self.uncertainty_dropout_p < 0 or self.uncertainty_dropout_p > 1: raise ValueError('The dropout probability must be in the range (0,1).') if self.dropout_sampling_size <= 1: raise ValueError('The argument `--dropout_sampling_size` must be an integer greater than 1.') # Validate that features provided for the prediction test set are also provided for the calibration set for (features_argument, base_features_path, cal_features_path) in [ ('`--features_path`', self.features_path, self.calibration_features_path), ('`--phase_features_path`', self.phase_features_path, self.calibration_phase_features_path), ('`--atom_descriptors_path`', self.atom_descriptors_path, self.calibration_atom_descriptors_path), ('`--bond_features_path`', self.bond_features_path, self.calibration_bond_features_path) ]: if base_features_path is not None and self.calibration_path is not None and cal_features_path is None: raise ValueError(f'Additional features were provided using the argument {features_argument}. The same kinds of features must be provided for the calibration dataset.') class InterpretArgs(CommonArgs): """:class:`InterpretArgs` includes :class:`CommonArgs` along with additional arguments used for interpreting a trained Chemprop model.""" data_path: str """Path to data CSV file.""" batch_size: int = 500 """Batch size.""" property_id: int = 1 """Index of the property of interest in the trained model.""" rollout: int = 20 """Number of rollout steps.""" c_puct: float = 10.0 """Constant factor in MCTS.""" max_atoms: int = 20 """Maximum number of atoms in rationale.""" min_atoms: int = 8 """Minimum number of atoms in rationale.""" prop_delta: float = 0.5 """Minimum score to count as positive.""" def process_args(self) -> None: super(InterpretArgs, self).process_args() self.smiles_columns = chemprop.data.utils.preprocess_smiles_columns( path=self.data_path, smiles_columns=self.smiles_columns, number_of_molecules=self.number_of_molecules, ) if self.features_path is not None: raise ValueError('Cannot use --features_path <path> for interpretation since features ' 'need to be computed dynamically for molecular substructures. ' 'Please specify --features_generator <generator>.') if self.checkpoint_paths is None or len(self.checkpoint_paths) == 0: raise ValueError('Found no checkpoints. Must specify --checkpoint_path <path> or ' '--checkpoint_dir <dir> containing at least one checkpoint.') class FingerprintArgs(PredictArgs): """:class:`FingerprintArgs` includes :class:`PredictArgs` with additional arguments for the generation of latent fingerprint vectors.""" fingerprint_type: Literal['MPN', 'last_FFN'] = 'MPN' """Choice of which type of latent fingerprint vector to use. Default is the output of the MPNN, excluding molecular features""" class HyperoptArgs(TrainArgs): """:class:`HyperoptArgs` includes :class:`TrainArgs` along with additional arguments used for optimizing Chemprop hyperparameters.""" num_iters: int = 20 """Number of hyperparameter choices to try.""" config_save_path: str """Path to :code:`.json` file where best hyperparameter settings will be written.""" log_dir: str = None """(Optional) Path to a directory where all results of the hyperparameter optimization will be written.""" hyperopt_checkpoint_dir: str = None """Path to a directory where hyperopt completed trial data is stored. Hyperopt job will include these trials if restarted. Can also be used to run multiple instances in parallel if they share the same checkpoint directory.""" startup_random_iters: int = 10 """The initial number of trials that will be randomly specified before TPE algorithm is used to select the rest.""" manual_trial_dirs: List[str] = None """Paths to save directories for manually trained models in the same search space as the hyperparameter search. Results will be considered as part of the trial history of the hyperparameter search.""" def process_args(self) -> None: super(HyperoptArgs, self).process_args() # Assign log and checkpoint directories if none provided if self.log_dir is None: self.log_dir = self.save_dir if self.hyperopt_checkpoint_dir is None: self.hyperopt_checkpoint_dir = self.log_dir class SklearnTrainArgs(TrainArgs): """:class:`SklearnTrainArgs` includes :class:`TrainArgs` along with additional arguments for training a scikit-learn model.""" model_type: Literal['random_forest', 'svm'] """scikit-learn model to use.""" class_weight: Literal['balanced'] = None """How to weight classes (None means no class balance).""" single_task: bool = False """Whether to run each task separately (needed when dataset has null entries).""" radius: int = 2 """Morgan fingerprint radius.""" num_bits: int = 2048 """Number of bits in morgan fingerprint.""" num_trees: int = 500 """Number of random forest trees.""" impute_mode: Literal['single_task', 'median', 'mean', 'linear','frequent'] = None """How to impute missing data (None means no imputation).""" class SklearnPredictArgs(Tap): """:class:`SklearnPredictArgs` contains arguments used for predicting with a trained scikit-learn model.""" test_path: str """Path to CSV file containing testing data for which predictions will be made.""" smiles_columns: List[str] = None """List of names of the columns containing SMILES strings. By default, uses the first :code:`number_of_molecules` columns.""" number_of_molecules: int = 1 """Number of molecules in each input to the model. This must equal the length of :code:`smiles_columns` (if not :code:`None`).""" preds_path: str """Path to CSV file where predictions will be saved.""" checkpoint_dir: str = None """Path to directory containing model checkpoints (:code:`.pkl` file)""" checkpoint_path: str = None """Path to model checkpoint (:code:`.pkl`
<gh_stars>1-10 ''' Warehouse Class Model Operations Processing System. Copyright <NAME> 2009-2010. Licenced under the EUPL. You may not use this work except in compliance with the Licence. You may obtain a copy of the Licence at http://ec.europa.eu/idabc/eupl or as attached with this application (see Licence file). Unless required by applicable law or agreed to in writing, software distributed under the Licence is distributed on an 'AS IS' basis WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed or implied. See the Licence governing permissions and limitations under the Licence. Changes: Rev 1 Unused variables removed Changed handling of default production rate in CPWARE LDWARE to only report outstanding waybills on order Amended LDWARE to reduce ordered by cars already at warehouse Amended LDWARE to show difference between ordered/onway LDWARE amended to not show loaded movements Station and Place added to Flash message (both Origin and Destination) ''' import MOPS_Element class cWarehouses(MOPS_Element.cElement): """details about warehouses. warehouses are unique on an origin-commodity-destination basis. warehouses are allocated to industries on the place file. industries produce commodities at a given rate which are stored up to a given value. at threshold values, empty cars of the required type are ordered. warehouses know about the required customer routing of loaded cars. """ extract_code = 'select * from warehouse' extract_header = 'id|industry|commodity|dest industry|prodn rate|threshold goods|' +\ 'threshold cars|threshold class|max storage|in storage|ordered|routing\n' def adware(self, message): """add a new warehouse. the warehouse is linked to an industry, a destination (for the loaded wagons) and a commodity. the rate of production is held. at a given threshold value of production, cars of a specific type are requested. a note of previously ordered wagons is held (to enable further requests for empty cars to be made). """ if self.show_access(message, 'ADWARE ^industry^;^commodity^;^destination^;prod rate;threshold quantity;' +\ 'cars to order;^class of car^;max storage;^customer routing^', 'S') != 0: return errors = 0 #industry------------------------------------------------------------------------------------ industry, rc = self.extract_field(message, 0, 'INDUSTRY') if rc > 0: return t = (industry, ) sql = 'select name, loading from place where industry = ?' count, ds_industries = self.db_read(sql, t) if count < 0: return if count == 0: print('* INDUSTRY CODE DOES NOT EXIST (' + str(industry) + ')') return else: for row in ds_industries: industry_name = row[0] industry_loading = row[1] #commodity---------------------------------------------------------------------------------- commodity, rc = self.extract_field(message, 1, 'COMMODITY') if rc > 0: return t = (commodity, ) sql = 'select name, loading from commodity where commodity = ?' count, data = self.db_read(sql, t) if count < 0: return if count == 0: print('* COMMODITY CODE DOES NOT EXIST (' + commodity + ')') return else: for row in data: commodity_name = row[0] commodity_loading = row[1] if commodity_loading != industry_loading: errors = errors + 1 print('* COMMODITY CANNOT BE LOADED AT THIS PLACE: LOADING CODES DISAGREE') #destination industry----------------------------------------------------------------------- destination, rc = self.extract_field(message, 2, 'DESTINATION INDUSTRY') if rc > 0: return t = (destination, ) sql = 'select name, unloading from place where industry = ?' count, data = self.db_read(sql, t) if count < 0: return if count == 0: errors = errors + 1 print('* DESTINATION INDUSTRY CODE DOES NOT EXIST (' + destination + ')') else: for row in data: destination_name = row[0] destination_unloading = row[1] # check that the industry/commodity/destination combo doesn't exist------------------------- t = (industry, commodity, destination) sql = 'select id from warehouse where industry = ? and commodity = ? and destination = ?' count, data = self.db_read(sql, t) if count < 0: return if count > 0: print('* INDUSTRY/COMMODITY/DESTINATION COMBINATION ALREADY EXISTS') errors = errors + 1 #production--------------------------------------------------------------------------------- production, rc = self.extract_field(message, 3, 'PRODUCTION RATE') if rc > 0: return try: if int(production) > 99999 or int(production) < 0: errors = errors + 1 print('* PRODUCTION RATE MUST BE IN THE RANGE 0 to 99999') except: errors = errors + 1 print('* PRODUCTION RATE MUST BE A WHOLE NUMBER') #threshold goods---------------------------------------------------------------------------- threshold_goods, rc = self.extract_field(message, 4, 'THRESHOLD TO ORDER CARS') if rc > 0: return try: if int(threshold_goods) > 99999 or int(threshold_goods) < 0: errors = errors + 1 print('* THRESHOLD MUST BE IN THE RANGE 0 to 99999') except: errors = errors + 1 print('* THRESHOLD MUST BE A WHOLE NUMBER') #threshold cars----------------------------------------------------------------------------- threshold_cars, rc = self.extract_field(message, 5, 'NUMBER OF CARS TO ORDER') if rc > 0: return try: if int(threshold_cars) > 99999 or int(threshold_cars) < 0: errors = errors + 1 print('* NUMBER OF CARS MUST BE IN THE RANGE 0 to 99999') except: errors = errors + 1 print('* NUMBER OF CARS MUST BE A WHOLE NUMBER') #threshold car class------------------------------------------------------------------------ threshold_class, rc = self.extract_field(message, 6, 'CAR CLASS') if rc > 0: return t = (threshold_class, ) sql = 'select id from carclass where carclass = ?' count, data = self.db_read(sql, t) if count < 0: return if count == 0: errors = errors + 1 print('* CAR CLASS CODE DOES NOT EXIST (' + threshold_class + ')') #max storage-------------------------------------------------------------------------------- max_storage, rc = self.extract_field(message, 7, 'MAXIMUM STORAGE') if rc > 0: return try: if int(max_storage) > 99999 or int(max_storage) < 0: errors = errors + 1 print('* MAXIMUM STORAGE MUST BE IN THE RANGE 0 to 99999') except: errors = errors + 1 print('* MAXIMUM STORAGE MUST BE A WHOLE NUMBER') #routing------------------------------------------------------------------------------------ routing, rc = self.extract_field(message, 8, 'ROUTING CODE') if rc > 0: return t = (routing, ) sql = 'select desc from routing where routing = ?' count, ds_routing = self.db_read(sql, t) if count < 0: return if count == 0: errors = errors + 1 print('* ROUTING CODE DOES NOT EXIST (' + routing + ')') else: for row in ds_routing: routing_name = row[0] #carry out the update----------------------------------------------------------------------- if errors != 0: return ordered = 0 in_storage = 0 t = (industry, commodity, destination, production, threshold_goods, threshold_cars, threshold_class, max_storage, in_storage, ordered, routing) sql = 'insert into warehouse values (null, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)' if self.db_update(sql, t) != 0: return t = (industry, commodity) sql = 'select id from warehouse where industry = ? and commodity = ? order by id' count, data = self.db_read(sql, t) for row in data: code = row[0] print('NEW WAREHOUSE ADDED SUCCESSFULLY') print('ID:' + str(code) + industry + industry_name) print('COMMODITY:' + commodity + commodity_name) print('DESTINATION:' + destination + destination_name + 'UNLOADING:', destination_unloading) print('PRODUCTION:' + str(production) + 'MAX STORAGE:' + str(max_storage) + 'ORDER AT:' + str(threshold_goods) + 'FOR:' + str(threshold_cars) + 'CAR(S) OF TYPE:' + threshold_class) print('ROUTING:' + routing + '(' + routing_name + ')') return errors def chware(self, message): """change a new warehouse. the warehouse is linked to an industry, a destination (for the loaded wagons) and a commodity. the rate of production is held. at a given threshold value of production, cars of a specific type are requested. a note of previously ordered wagons is held (to enable further requests for empty cars to be made). """ if self.show_access(message, 'CHWARE warehouse id;(^industry^);(^commodity^);(^destination^);(prod rate);' +\ '(threshold quantity);(cars to order);(^class of car^);' +\ '(max storage);(^customer routing^)', 'S') != 0: return errors = 0 #warehouse code----------------------------------------------------------------------------- warehouse, rc = self.extract_field(message, 0, 'WAREHOUSE CODE') if rc > 0: return #read the database and populate the fields t = (warehouse,) sql = 'select industry, commodity, destination, production, ' +\ 'threshold_goods, threshold_cars, threshold_class, max_storage, ' +\ 'in_storage, routing from warehouse where warehouse.id = ?' count, ds_warehouse = self.db_read(sql, t) if count < 0: return for row in ds_warehouse: industry = row[0] commodity = row[1] destination = row[2] production = row[3] threshold_goods = row[4] threshold_cars = row[5] threshold_class = row[6] max_storage = row[7] routing = row[9] #industry----------------------------------------------------------------------------------- value, rc = self.extract_field(message, 1, '') if rc == 0: industry = value t = (industry, ) sql = 'select industry from place where industry = ?' count, ds_industry = self.db_read(sql, t) if count < 0: return if count == 0: errors = errors +
<filename>ctdcal/process_bottle.py<gh_stars>1-10 """Library to create SBE .btl equivalent files. TODO: allow for variable bottle fire scans instead of SBE standard 36 ex: user doesn't know how to change the config for the cast to add more scans, instead does it post-cast? <NAME> SIO/ODF Nov 7, 2016 """ import csv import logging import statistics import sys from collections import OrderedDict from datetime import datetime from pathlib import Path import numpy as np import pandas as pd from . import flagging as flagging from . import get_ctdcal_config from . import oxy_fitting as oxy_fitting cfg = get_ctdcal_config() log = logging.getLogger(__name__) BOTTLE_FIRE_COL = "btl_fire" BOTTLE_FIRE_NUM_COL = "btl_fire_num" # Retrieve the bottle data from a converted file. def retrieveBottleDataFromFile(converted_file): converted_df = pd.read_pickle(converted_file) return retrieveBottleData(converted_df) # Retrieve the bottle data from a dataframe created from a converted file. def retrieveBottleData(converted_df): if BOTTLE_FIRE_COL in converted_df.columns: converted_df[BOTTLE_FIRE_NUM_COL] = ( ( (converted_df[BOTTLE_FIRE_COL]) & ( converted_df[BOTTLE_FIRE_COL] != converted_df[BOTTLE_FIRE_COL].shift(1) ) ) .astype(int) .cumsum() ) # converted_df['bottle_fire_num'] = ((converted_df[BOTTLE_FIRE_COL] == False)).astype(int).cumsum() return converted_df.loc[converted_df[BOTTLE_FIRE_COL]] # return converted_df else: log.error("Bottle fire column:", BOTTLE_FIRE_COL, "not found") return pd.DataFrame() # empty dataframe def bottle_mean(btl_df): """Compute the mean for each bottle from a dataframe.""" btl_max = int(btl_df[BOTTLE_FIRE_NUM_COL].tail(n=1)) i = 1 output = pd.DataFrame() while i <= btl_max: output = pd.concat( ( output, btl_df[btl_df[BOTTLE_FIRE_NUM_COL] == i] .mean() .to_frame(name=i) .transpose(), ) ) i += 1 return output def bottle_median(btl_df): """Compute the median for each bottle from a dataframe.""" btl_max = int(btl_df[BOTTLE_FIRE_NUM_COL].tail(n=1)) i = 1 output = pd.DataFrame() while i <= btl_max: output = pd.concat( ( output, btl_df[btl_df[BOTTLE_FIRE_NUM_COL] == i] .median() .to_frame(name=i) .transpose(), ) ) i += 1 return output def _load_btl_data(btl_file, cols=None): """ Loads "bottle mean" CTD data from .pkl file. Function will return all data unless cols is specified (as a list of column names) """ btl_data = pd.read_pickle(btl_file) if cols is not None: btl_data = btl_data[cols] btl_data["SSSCC"] = Path(btl_file).stem.split("_")[0] return btl_data def _load_reft_data(reft_file, index_name="btl_fire_num"): """ Loads reft_file to dataframe and reindexes to match bottle data dataframe """ reft_data = pd.read_csv(reft_file, usecols=["btl_fire_num", "T90", "REFTMP_FLAG_W"]) reft_data.set_index(index_name) reft_data["SSSCC_TEMP"] = Path(reft_file).stem.split("_")[0] reft_data["REFTMP"] = reft_data["T90"] return reft_data def _load_salt_data(salt_file, index_name="SAMPNO"): """ Loads salt_file to dataframe and reindexes to match bottle data dataframe """ salt_data = pd.read_csv( salt_file, usecols=["SAMPNO", "SALNTY", "BathTEMP", "CRavg"] ) salt_data.set_index(index_name) salt_data["SSSCC_SALT"] = Path(salt_file).stem.split("_")[0] salt_data.rename(columns={"SAMPNO": "SAMPNO_SALT"}, inplace=True) return salt_data def _add_btl_bottom_data(df, cast, lat_col="LATITUDE", lon_col="LONGITUDE", decimals=4): cast_details = pd.read_csv( # cfg.dirs["logs"] + "cast_details.csv", dtype={"SSSCC": str} cfg.dirs["logs"] + "bottom_bottle_details.csv", dtype={"SSSCC": str}, ) cast_details = cast_details[cast_details["SSSCC"] == cast] # df[lat_col] = np.round(cast_details["latitude"].iat[0], decimals) # df[lon_col] = np.round(cast_details["longitude"].iat[0], decimals) df[lat_col] = cast_details["latitude"].iat[0] df[lon_col] = cast_details["longitude"].iat[0] ts = pd.to_datetime(cast_details["bottom_time"].iat[0], unit="s") date = ts.strftime("%Y%m%d") hour = ts.strftime("%H%M") df["DATE"] = date df["TIME"] = hour return df def load_all_btl_files(ssscc_list, cols=None): """ Load bottle and secondary (e.g. reference temperature, bottle salts, bottle oxygen) files for station/cast list and merge into a dataframe. Parameters ---------- ssscc_list : list of str List of stations to load cols : list of str, optional Subset of columns to load, defaults to loading all Returns ------- df_data_all : DataFrame Merged dataframe containing all loaded data """ df_data_all = pd.DataFrame() for ssscc in ssscc_list: log.info("Loading BTL data for station: " + ssscc + "...") btl_file = cfg.dirs["bottle"] + ssscc + "_btl_mean.pkl" btl_data = _load_btl_data(btl_file, cols) ### load REFT data reft_file = cfg.dirs["reft"] + ssscc + "_reft.csv" try: reft_data = _load_reft_data(reft_file) except FileNotFoundError: log.warning( "Missing (or misnamed) REFT Data Station: " + ssscc + "...filling with NaNs" ) reft_data = pd.DataFrame(index=btl_data.index, columns=["T90"], dtype=float) reft_data["btl_fire_num"] = btl_data["btl_fire_num"].astype(int) reft_data["SSSCC_TEMP"] = ssscc # TODO: is this ever used? ### load REFC data refc_file = cfg.dirs["salt"] + ssscc + "_salts.csv" try: refc_data = _load_salt_data(refc_file, index_name="SAMPNO") except FileNotFoundError: log.warning( "Missing (or misnamed) REFC Data Station: " + ssscc + "...filling with NaNs" ) refc_data = pd.DataFrame( index=btl_data.index, columns=["CRavg", "BathTEMP", "BTLCOND"], dtype=float, ) refc_data["SAMPNO_SALT"] = btl_data["btl_fire_num"].astype(int) ### load OXY data oxy_file = cfg.dirs["oxygen"] + ssscc try: oxy_data, params = oxy_fitting.load_winkler_oxy(oxy_file) except FileNotFoundError: log.warning( "Missing (or misnamed) REFO Data Station: " + ssscc + "...filling with NaNs" ) oxy_data = pd.DataFrame( index=btl_data.index, columns=[ "FLASKNO", "TITR_VOL", "TITR_TEMP", "DRAW_TEMP", "TITR_TIME", "END_VOLTS", ], dtype=float, ) oxy_data["STNNO_OXY"] = ssscc[:3] # TODO: are these values oxy_data["CASTNO_OXY"] = ssscc[3:] # ever used? oxy_data["BOTTLENO_OXY"] = btl_data["btl_fire_num"].astype(int) ### clean up dataframe # Horizontally concat DFs to have all data in one DF btl_data = pd.merge(btl_data, reft_data, on="btl_fire_num", how="outer") btl_data = pd.merge( btl_data, refc_data, left_on="btl_fire_num", right_on="SAMPNO_SALT", how="outer", ) btl_data = pd.merge( btl_data, oxy_data, left_on="btl_fire_num", right_on="BOTTLENO_OXY", how="outer", ) if len(btl_data) > 36: log.error( f"""Length of bottle data for {ssscc} is > 36, check for errors in reference parameter files""" ) # Add bottom of cast information (date,time,lat,lon,etc.) btl_data = _add_btl_bottom_data(btl_data, ssscc) # Merge cast into df_data_all try: df_data_all = pd.concat([df_data_all, btl_data], sort=False) except AssertionError: raise AssertionError( "Columns of " + ssscc + " do not match those of previous columns" ) # print("* Finished BTL data station: " + ssscc + " *") # Drop duplicated columns generated by concatenation df_data_all = df_data_all.loc[:, ~df_data_all.columns.duplicated()] df_data_all["master_index"] = range(len(df_data_all)) return df_data_all def _reft_loader(ssscc, reft_dir): # semi-flexible search for reft file (in the form of *ssscc.cap) try: reft_path = sorted(Path(reft_dir).glob(f"*{ssscc}.cap"))[0] except IndexError: raise FileNotFoundError # this works better than pd.read_csv as format is semi-inconsistent (cf .cap files) with open(reft_path, "r", newline="") as f: reftF = csv.reader( f, delimiter=" ", quoting=csv.QUOTE_NONE, skipinitialspace="True" ) reftArray = [] for row in reftF: if len(row) != 17: # skip over 'bad' rows (empty lines, comments, etc.) continue reftArray.append(row) reftDF = pd.DataFrame.from_records(reftArray) reftDF = reftDF.replace( # remove text columns, only need numbers and dates to_replace=["bn", "diff", "val", "t90", "="], value=np.nan ) reftDF = reftDF.dropna(axis=1) reftDF[1] = reftDF[[1, 2, 3, 4]].agg(" ".join, axis=1) # dd/mm/yy/time cols are reftDF.drop(columns=[2, 3, 4], inplace=True) # read separately; combine into one columns = OrderedDict( # having this as a dict streamlines next steps [ ("index_memory", int), ("datetime", object), ("btl_fire_num", int), ("diff", int), ("raw_value", float), ("T90", float), ] ) reftDF.columns = list(columns.keys()) # name columns reftDF = reftDF.astype(columns) # force dtypes # assign initial flags (large "diff" = unstable reading, flag questionable) reftDF["REFTMP_FLAG_W"] = 2 reftDF.loc[reftDF["diff"].abs() >= 3000, "REFTMP_FLAG_W"] = 3 # add in STNNBR, CASTNO columns # TODO: should these be objects or floats? be consistent! # string prob better for other sta/cast formats (names, letters, etc.) reftDF["STNNBR"] = ssscc[0:3] reftDF["CASTNO"] = ssscc[3:5] return reftDF def process_reft(ssscc_list, reft_dir=cfg.dirs["reft"]): # TODO: import reft_dir from a config file """ SBE35 reference thermometer processing function. Load in .cap files for given station/cast list, perform basic flagging, and export to .csv files. Inputs ------ ssscc_list : list of str List of stations to process reft_dir : str, optional Path to folder containing raw salt files (defaults to data/reft/) """ for ssscc in ssscc_list: if not Path(reft_dir + ssscc + "_reft.csv").exists(): try: reftDF = _reft_loader(ssscc, reft_dir) reftDF.to_csv(reft_dir + ssscc + "_reft.csv", index=False) except FileNotFoundError: log.warning( "refT file for cast " + ssscc + " does not exist... skipping" ) return def add_btlnbr_cols(df, btl_num_col): df["BTLNBR"] = df[btl_num_col].astype(int) # default to everything being good df["BTLNBR_FLAG_W"] = 2 return df def load_hy_file(path_to_hyfile): df = pd.read_csv(path_to_hyfile, comment="#", skiprows=[0]) df = df[df["EXPOCODE"] != "END_DATA"] return df def export_report_data(df): df["STNNBR"] = [int(x[0:3]) for x in df["SSSCC"]] df["CTDPRS"] = df["CTDPRS"].round(1) cruise_report_cols = [ "STNNBR", "CTDPRS", "CTDTMP1", "CTDTMP1_FLAG_W", "CTDTMP2", "CTDTMP2_FLAG_W", "REFTMP", "CTDCOND1", "CTDCOND1_FLAG_W", "CTDCOND2", "CTDCOND2_FLAG_W", "BTLCOND", "CTDSAL", "CTDSAL_FLAG_W", "SALNTY", "CTDOXY", "CTDOXY_FLAG_W", "CTDRINKO", "CTDRINKO_FLAG_W", "OXYGEN", ] # add in missing flags df["CTDTMP1_FLAG_W"] = flagging.by_residual( df["CTDTMP1"], df["REFTMP"], df["CTDPRS"] ) df["CTDTMP2_FLAG_W"] = flagging.by_residual( df["CTDTMP1"], df["REFTMP"], df["CTDPRS"] ) df["CTDCOND1_FLAG_W"] = flagging.by_residual( df["CTDCOND1"], df["BTLCOND"], df["CTDPRS"] ) df["CTDCOND2_FLAG_W"] = flagging.by_residual( df["CTDCOND2"], df["BTLCOND"], df["CTDPRS"] ) df["CTDOXY_FLAG_W"] = flagging.by_percent_diff(df["CTDOXY"], df["OXYGEN"]) df["CTDRINKO_FLAG_W"] = flagging.by_percent_diff(df["CTDRINKO"], df["OXYGEN"]) df[cruise_report_cols].to_csv("data/scratch_folder/report_data.csv", index=False) return def export_hy1(df, out_dir=cfg.dirs["pressure"], org="ODF"): log.info("Exporting bottle file") btl_data = df.copy() now = datetime.now() file_datetime = now.strftime("%Y%m%d") # TODO: move to config; integrate Barna's "params" package instead? btl_columns = { "EXPOCODE": "", "SECT_ID": "", "STNNBR": "", "CASTNO": "", "SAMPNO": "", "BTLNBR": "", "BTLNBR_FLAG_W": "", "DATE": "", "TIME": "", "LATITUDE": "", "LONGITUDE": "", "DEPTH": "METERS", "CTDPRS": "DBAR", "CTDTMP": "ITS-90", "CTDSAL": "PSS-78", "CTDSAL_FLAG_W": "", "SALNTY": "PSS-78", "SALNTY_FLAG_W": "", # "CTDOXY": "UMOL/KG", # "CTDOXY_FLAG_W": "", # "CTDRINKO": "UMOL/KG", # "CTDRINKO_FLAG_W": "", "CTDOXY": "UMOL/KG", "CTDOXY_FLAG_W": "", "OXYGEN": "UMOL/KG", "OXYGEN_FLAG_W": "", "REFTMP": "ITS-90", "REFTMP_FLAG_W": "",
<gh_stars>1-10 #!/usr/bin/env python3 original_print = print from prompt_toolkit import print_formatted_text, Application # from prompt_toolkit import print_formatted_text as print from prompt_toolkit.formatted_text import HTML import pathlib from bs4 import BeautifulSoup # http://www.grantjenks.com/docs/diskcache/ import diskcache from icli.futsexchanges import FUTS_EXCHANGE import icli.orders as orders import decimal import sys from collections import Counter, defaultdict from dataclasses import dataclass, field import datetime import os from typing import Union, Optional, Sequence, Any, Mapping import numpy as np import pendulum import pandas as pd # for automatic money formatting in some places import locale import math locale.setlocale(locale.LC_ALL, "") import os # Tell pygame to not print a hello message when it is imported os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide" # sounds! import pygame import ib_insync from ib_insync import ( IB, Contract, Trade, Bag, ComboLeg, Ticker, RealTimeBarList, PnLSingle, Order, NewsBulletin, NewsTick, ) import pprint import asyncio import logging from loguru import logger import seaborn import icli.lang as lang from icli.helpers import * # FUT_EXP is appearing from here from mutil.numeric import fmtPrice, fmtPricePad from mutil.timer import Timer import tradeapis.buylang as buylang # Configure logger where the ib_insync live service logs get written. # Note: if you have weird problems you don't think are being exposed # in the CLI, check this log file for what ib_insync is actually doing. logging.basicConfig( level=logging.INFO, filename=f"icli-{pendulum.now('US/Eastern')}.log", format="%(asctime)s %(message)s", ) pp = pprint.PrettyPrinter(indent=4) # setup color gradients we use to show gain/loss of daily quotes COLOR_COUNT = 100 # palette 'RdYlGn' is a spectrum from low RED to high GREEN which matches # the colors we want for low/negative (red) to high/positive (green) MONEY_COLORS = seaborn.color_palette("RdYlGn", n_colors=COLOR_COUNT, desat=1).as_hex() # only keep lowest 25 and highest 25 elements since middle values are less distinct MONEY_COLORS = MONEY_COLORS[:25] + MONEY_COLORS[-25:] # display order we want: RTY, ES, NQ, YM FUT_ORD = dict(MES=-9, ES=-9, RTY=-10, M2K=-10, NQ=-8, MNQ=-8, MYM=-7, YM=-7) # A-Z, Z-A, translate between them (lowercase only) ATOZ = "".join([chr(x) for x in range(ord("a"), ord("z") + 1)]) ZTOA = ATOZ[::-1] ATOZTOA_TABLE = str.maketrans(ATOZ, ZTOA) def invertstr(x): return x.translate(ATOZTOA_TABLE) # Fields updated live for toolbar printing. # Printed in the order of this list (the order the dict is created) # Some math and definitions for values: # https://www.interactivebrokers.com/en/software/tws/usersguidebook/realtimeactivitymonitoring/available_for_trading.htm # https://ibkr.info/node/1445 LIVE_ACCOUNT_STATUS = [ # row 1 "AvailableFunds", "BuyingPower", "Cushion", "DailyPnL", "DayTradesRemaining", "DayTradesRemainingT+1", "DayTradesRemainingT+2", "DayTradesRemainingT+3", "DayTradesRemainingT+4", # row 2 "ExcessLiquidity", "FuturesPNL", "GrossPositionValue", "MaintMarginReq", "OptionMarketValue", # row 3 "NetLiquidation", "RealizedPnL", "TotalCashValue", "UnrealizedPnL", "SMA", # unpopulated: # "Leverage", # "HighestSeverity", ] STATUS_FIELDS = set(LIVE_ACCOUNT_STATUS) def asink(x): # don't use print_formatted_text() (aliased to print()) because it doesn't # respect the patch_stdout() context manager we've wrapped this entire # runtime around. If we don't have patch_stdout() guarantees, the interface # rips apart with prompt and bottom_toolbar problems during async logging. original_print(x, end="") logger.remove() logger.add(asink, colorize=True) # new log level to disable color bolding on INFO default logger.level("FRAME", no=25) logger.level("ARGS", no=40, color="<blue>") def readableHTML(html): """Return contents of 'html' with tags stripped and in a _reasonably_ readable plain text format""" return re.sub(r"(\n[\s]*)+", "\n", bs4.BeautifulSoup(html).get_text()) # logger.remove() # logger.add(asink, colorize=True) # Create prompt object. from prompt_toolkit import PromptSession from prompt_toolkit.history import FileHistory, ThreadedHistory from prompt_toolkit.application import get_app from prompt_toolkit.shortcuts import set_title import asyncio import os stocks = ["IWM", "QQQ", "VXX", "AAPL", "SBUX", "TSM"] # Futures to exchange mappings: # https://www.interactivebrokers.com/en/index.php?f=26662 # Note: Use ES and RTY and YM for quotes because higher volume # also curiously, MNQ has more volume than NQ? # Volumes at: https://www.cmegroup.com/trading/equity-index/us-index.html # ES :: MES # RTY :: M2K # YM :: MYM # NQ :: MNQ sfutures = { "GLOBEX": ["ES", "RTY", "MNQ", "GBP"], # "HE"], "ECBOT": ["YM"], # , "TN", "ZF"], # "NYMEX": ["GC", "QM"], } # Note: ContFuture is only for historical data; it can't quote or trade. # So, all trades must use a manual contract month (quarterly) futures = [ Future(symbol=sym, lastTradeDateOrContractMonth=FUT_EXP, exchange=x, currency="USD") for x, syms in sfutures.items() for sym in syms ] # logger.info("futures are: {}", futures) @dataclass class IBKRCmdlineApp: # Your IBKR Account ID (required) accountId: str # number of seconds between refreshing the toolbar quote/balance views # (more frequent updates is higher redraw CPU utilization) toolbarUpdateInterval: float = 2.22 host: str = "127.0.0.1" port: int = 4001 # initialized to True/False when we first see the account # ID returned from the API which will tell us if this is a # sandbox ID or True Account ID isSandbox: Optional[bool] = None # The Connection ib: IB = field(default_factory=IB) # generic cache for data usage (strikes, etc) cache: Mapping[Any, Any] = field( default_factory=lambda: diskcache.Cache("./cache-multipurpose") ) # State caches quoteState: dict[str, Ticker] = field(default_factory=dict) quoteContracts: dict[str, Contract] = field(default_factory=dict) depthState: dict[Contract, Ticker] = field(default_factory=dict) summary: dict[str, float] = field(default_factory=dict) position: dict[str, float] = field(default_factory=dict) order: dict[str, float] = field(default_factory=dict) liveBars: dict[str, RealTimeBarList] = field(default_factory=dict) pnlSingle: dict[str, PnLSingle] = field(default_factory=dict) exiting: bool = False ol: buylang.OLang = field(default_factory=buylang.OLang) # Specific dict of ONLY fields we show in the live account status toolbar. # Saves us from sorting/filtering self.summary() with every full bar update. accountStatus: dict[str, float] = field( default_factory=lambda: dict( zip(LIVE_ACCOUNT_STATUS, [0.00] * len(LIVE_ACCOUNT_STATUS)) ) ) # Cache all contractIds to names conIdCache: Mapping[int, Contract] = field( default_factory=lambda: diskcache.Cache("./cache-contracts") ) def __post_init__(self) -> None: # just use the entire IBKRCmdlineApp as our app state! self.opstate = self async def qualify(self, *contracts) -> Union[list[Contract], None]: """Qualify contracts against the IBKR allowed symbols. Mainly populates .localSymbol and .conId We also cache the results for ease of re-use and for mapping contractIds back to names later.""" # Note: this is the ONLY place we use self.ib.qualifyContractsAsync(). # All other usage should use self.qualify() so the cache is maintained. got = await self.ib.qualifyContractsAsync(*contracts) # iterate resolved contracts and save them all for contract in got: # Populate the id to contract cache! if contract.conId not in self.conIdCache: # default 30 day expiration... self.conIdCache.set(contract.conId, contract, expire=86400 * 30) # type: ignore return got def contractForPosition( self, sym, qty: Optional[float] = None ) -> Union[None, tuple[Contract, float, float]]: """Returns matching portfolio position as (contract, size, marketPrice). Looks up position by symbol name and returns either provided quantity or total quantity. If no input quantity, return total position size. If input quantity larger than position size, returned size is capped to max position size.""" portitems = self.ib.portfolio() logger.info("Port is: {}", portitems) contract = None for pi in portitems: # Note: using 'localSymbol' because for options, it includes # the full OCC-like format, while contract.symbol will just # be the underlying equity symbol. if pi.contract.localSymbol == sym: contract = pi.contract if qty is None: qty = pi.position elif abs(qty) > abs(pi.position): qty = pi.position return contract, qty, pi.marketPrice return None async def contractForOrderRequest( self, oreq: buylang.OrderRequest, exchange="SMART" ) -> Optional[Contract]: """Return a valid qualified contract for any order request. If order request has multiple legs, returns a Bag contract representing the spread. If order request only has one symbol, returns a regular future/stock/option contract. If symbol(s) in order request are not valid, returns None.""" if oreq.isSpread(): return await self.bagForSpread(oreq, exchange) if oreq.isSingle(): contract = contractForName(oreq.orders[0].symbol, exchange=exchange) await self.qualify(contract) # only return success if the contract validated if contract.conId: return contract return None # else, order request had no orders... return None async def bagForSpread( self, oreq: buylang.OrderRequest, exchange="SMART", currency="USD" ) -> Optional[Bag]: """Given a multi-leg OrderRequest, return a qualified Bag contract. If legs do not validate, returns None and prints errors along the way.""" # For IBKR spreads ("Bag" contracts), each leg of the spread is qualified # then placed in the final contract instead of the normal approach of qualifying # the final contract itself (because Bag contracts have Legs and each Leg is only # a contractId we have to look up via qualify() individually). contracts = [ contractForName(s.symbol, exchange=exchange, currency=currency) for s in oreq.orders ] await self.qualify(*contracts) if not all(c.conId for c in contracts): logger.error("Not all contracts qualified!") return None contractUnderlying = contracts[0].symbol reqUnderlying = oreq.orders[0].underlying() if contractUnderlying != reqUnderlying.lstrip("/"): logger.error( "Resolved symbol [{}] doesn't match order underlying [{}]?", contractUnderlying, reqUnderlying, ) return None if not all(c.symbol == contractUnderlying for c in contracts): logger.error("All contracts must have same underlying for spread!") return None # Iterate (in MATCHED PAIRS) the resolved contracts with their original order details legs = [] # We use more explicit exchange mapping here since future options # require naming their exchanges instead of using SMART everywhere. useExchange: str for c, o in zip(contracts, oreq.orders): useExchange = c.exchange leg = ComboLeg( conId=c.conId, ratio=o.multiplier,
<gh_stars>100-1000 # Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # import math import os import sys from logging import getLogger import torch from .pretrain import load_embeddings # , TRANSFORMER_LAYER_PARAMS from .transformer import DECODER_ONLY_PARAMS, TransformerModel, Classifier from ..data.dictionary import UNK_WORD logger = getLogger() def check_model_params(params): """ Check models parameters. """ # masked language modeling task parameters assert params.bptt >= 1 assert 0 <= params.word_pred < 1 assert 0 <= params.sample_alpha < 1 s = params.word_mask_keep_rand.split(",") assert len(s) == 3 s = [float(x) for x in s] assert all([0 <= x <= 1 for x in s]) and sum(s) == 1 params.word_mask = s[0] params.word_keep = s[1] params.word_rand = s[2] if params.mask_length == "": params.mask_length = None params.mask_length_dist = None elif params.mask_length == "poisson": assert ( params.poisson_lambda is not None ), "poisson_lambda is None, it should be set when using poisson mask_length" _lambda = params.poisson_lambda lambda_to_the_k = 1 e_to_the_minus_lambda = math.exp(-_lambda) k_factorial = 1 ps = [] for k in range(0, 128): ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial) lambda_to_the_k *= _lambda k_factorial *= k + 1 if ps[-1] < 0.0000001: break ps = torch.FloatTensor(ps) params.mask_length_dist_probas = ps params.mask_length_dist = torch.distributions.Categorical(ps) else: params.mask_length = int(params.mask_length) ps = torch.FloatTensor(params.mask_length + 1).fill_(0.0) ps[params.mask_length] = 1 params.mask_length_dist = torch.distributions.Categorical(ps) # input sentence noise for DAE if len(params.ae_steps) == 0: assert params.word_shuffle == 0 assert params.word_dropout == 0 assert params.word_blank == 0 else: assert params.word_shuffle == 0 or params.word_shuffle > 1 assert 0 <= params.word_dropout < 1 assert 0 <= params.word_blank < 1 # model dimensions if params.emb_dim_encoder == 0 and params.emb_dim_decoder == 0: assert params.emb_dim > 0 params.emb_dim_encoder = params.emb_dim params.emb_dim_decoder = params.emb_dim else: assert params.emb_dim == 0 assert params.emb_dim_encoder > 0 and params.emb_dim_decoder > 0 if params.emb_dim_encoder == params.emb_dim_decoder: params.emb_dim = params.emb_dim_decoder else: assert params.reload_emb == "", ( "Pre-trained embeddings are not supported when the embedding size of the " "encoder and the decoder do not match " ) assert params.emb_dim_encoder % params.n_heads == 0 assert params.emb_dim_decoder % params.n_heads == 0 if params.n_layers_encoder == 0 and params.n_layers_decoder == 0: assert params.n_layers > 0 params.n_layers_encoder = params.n_layers params.n_layers_decoder = params.n_layers else: assert params.n_layers == 0 assert params.n_layers_encoder > 0 and params.n_layers_decoder > 0 # reload pretrained word embeddings if params.reload_emb != "": assert os.path.isfile(params.reload_emb) # reload a pretrained model if params.reload_model != "": if params.encoder_only: assert os.path.isfile(params.reload_model) else: s = params.reload_model.split(",") assert len(s) == 2 assert all([x == "" or os.path.isfile(x) for x in s]), [ x for x in s if not os.path.isfile(x) ] if params.use_classifier and params.reload_classifier == "": params.reload_classifier = params.reload_model assert not ( params.beam_size > 1 and params.number_samples > 1 ), "Cannot sample when already doing beam search" assert (params.eval_temperature is None) == ( params.number_samples <= 1 ), "Eval temperature should be set if and only if taking several samples at eval time" def set_pretrain_emb(model, dico, word2id, embeddings, gpu): """ Pretrain word embeddings. """ n_found = 0 with torch.no_grad(): for i in range(len(dico)): idx = word2id.get(dico[i], None) if idx is None: continue n_found += 1 model.embeddings.weight[i] = ( embeddings[idx].cuda() if gpu else embeddings[idx] ) model.pred_layer.proj.weight[i] = ( embeddings[idx].cuda() if gpu else embeddings[idx] ) logger.info( "Pretrained %i/%i words (%.3f%%)." % (n_found, len(dico), 100.0 * n_found / len(dico)) ) @torch.no_grad() def build_model(params, dico, gpu=True): """ Build model. """ if params.encoder_only: # build model = TransformerModel(params, dico, is_encoder=True, with_output=True) # reload pretrained word embeddings if params.reload_emb != "": word2id, embeddings = load_embeddings(params.reload_emb, params) set_pretrain_emb(model, dico, word2id, embeddings, gpu) # reload a pretrained model if params.reload_model != "": logger.info("============ Model Reloading") logger.info("Reloading model from %s ..." % params.reload_model) reload_transformer(params, params.reload_model, dico, model, "model", gpu) logger.info("Model: {}".format(model)) logger.info( "Number of parameters (model): %i" % sum([p.numel() for p in model.parameters() if p.requires_grad]) ) logger.info("") return [model.cuda() if gpu else model] else: # build # TODO: only output when necessary - len(params.clm_steps + params.mlm_steps) > 0 encoder = TransformerModel(params, dico, is_encoder=True, with_output=True) if params.separate_decoders: decoders = [ TransformerModel(params, dico, is_encoder=False, with_output=True) for _ in params.lang2id.values() ] else: decoders = [ TransformerModel(params, dico, is_encoder=False, with_output=True) ] for layer in range(params.n_layers_decoder): if layer <= params.n_share_dec - 1: assert params.amp == -1, "sharing layers is not supported with AMP" logger.info("Sharing decoder attention parameters for layer %i" % layer) for i in range(1, len(decoders)): decoders[i].attentions[layer] = decoders[0].attentions[layer] # reload pretrained word embeddings if params.reload_emb != "": word2id, embeddings = load_embeddings(params.reload_emb, params) set_pretrain_emb(encoder, dico, word2id, embeddings, gpu) for decoder in decoders: set_pretrain_emb(decoder, dico, word2id, embeddings, gpu) # reload a pretrained model if params.reload_model != "": logger.info("============ Model Reloading") enc_path, dec_path = params.reload_model.split(",") assert not (enc_path == "" and dec_path == "") # reload encoder if enc_path != "": logger.info("Reloading encoder from %s ..." % enc_path) reload_transformer(params, enc_path, dico, encoder, "encoder", gpu) # reload decoders if dec_path != "": for dec in decoders: logger.info("Reloading decoders from %s ..." % dec_path) if params.reload_encoder_for_decoder: reload_transformer(params, dec_path, dico, dec, "encoder", gpu) else: reload_transformer(params, dec_path, dico, dec, "decoder", gpu) logger.debug("Encoder: {}".format(encoder)) logger.debug("Decoder: {}".format(decoders)) logger.info( "Number of parameters (encoder): %i" % sum([p.numel() for p in encoder.parameters() if p.requires_grad]) ) logger.info( "Number of parameters (decoders): %i" % sum([p.numel() for p in decoders[0].parameters() if p.requires_grad]) ) logger.info(f"Number of decoders: {len(decoders)}") logger.info("") return ( [encoder.cuda() if gpu else encoder], [dec.cuda() if gpu else dec for dec in decoders], ) @torch.no_grad() def build_classifier(params): """ Build classifier. """ # build classifier = Classifier(params) # reload a pretrained model if params.reload_classifier != "": logger.info("Reloading classifier from %s ..." % params.reload_classifier) reloaded = torch.load( params.reload_classifier, map_location=lambda storage, loc: storage.cuda(params.local_rank), ) if "classifier" not in reloaded: logger.warning( f"There is no classifier in {params.reload_classifier}. The classifier weights will be initialized randomly" ) else: reloaded = reloaded["classifier"] if all([k.startswith("module.") for k in reloaded.keys()]): reloaded = {k[len("module.") :]: v for k, v in reloaded.items()} classifier.load_state_dict(reloaded) logger.info("Classifier: {}".format(classifier)) return [classifier.cuda()] def reload_transformer(params, path, dico, model, model_type, gpu=True): """ Reload a transformer state dict to current model: clean 'module.' from state dict, match the word embeddings comparing dicos, match lang embedding with params lang mapping, extend or truncate position embeddings when size dont match, load state dict. """ reloaded = torch.load( path, map_location=lambda storage, loc: storage.cuda(params.local_rank) if gpu else storage.cpu(), ) clean_model_state_dict(reloaded, model_type) reload_word_embeddings(reloaded, dico, model_type) reload_lang_embeddings(reloaded, params, model_type) reload_position_embeddings(reloaded, model, model_type) # if the model is a decoder if hasattr(model, "encoder_attn"): for i in range(params.n_layers_decoder): for name in DECODER_ONLY_PARAMS: weight_name = name % i if weight_name not in reloaded[model_type]: logger.warning("Parameter %s not found." % (weight_name)) encoder_attn_name = weight_name.replace( "encoder_attn", "attentions" ) if ( getattr(params, "reload_encoder_attn_on_decoder", False) and "encoder_attn" in weight_name and encoder_attn_name in reloaded[model_type] ): logger.warning(f"Reloading {encoder_attn_name} instead") reloaded[model_type][weight_name] = ( reloaded[model_type][encoder_attn_name].clone().detach() ) else: reloaded[model_type][weight_name] = model.state_dict()[ weight_name ] model.load_state_dict(reloaded[model_type], strict=not params.spans_emb_encoder) def clean_model_state_dict(reloaded, model_type): """ remove prefix module from the keys of the model state dict. """ model_reloaded = reloaded[model_type if model_type in reloaded else "model"] if all([k.startswith("module.") for k in model_reloaded.keys()]): model_reloaded = {k[len("module.") :]: v for k, v in model_reloaded.items()} reloaded[model_type] = model_reloaded def reload_word_embeddings(reloaded, dico, model_type): """ Check when reloading a model that dictionary are the same. If not, do a word embedding mapping if possible. """ reloaded_word2id = reloaded["dico_word2id"] reloaded_id2word = reloaded["dico_id2word"] assert len(reloaded_word2id) == len(reloaded_id2word) assert all(reloaded_id2word[v] == k for k, v in reloaded_word2id.items()) matching_indices = [] word_not_found = [] for idx, word in dico.id2word.items(): if word not in reloaded_word2id: word_not_found += [word] matching_indices += [reloaded_word2id[UNK_WORD]] else: matching_indices += [reloaded_word2id[word]] assert len(matching_indices) == len(dico) if len(word_not_found) > 0: logger.warning( f"When reloading word embeddings, could not find embeddings for {len(word_not_found)} words: {word_not_found[0:5] + ['...'] + word_not_found[-5:]}... Initializing them to < unk >." ) reloaded[model_type]["embeddings.weight"] = torch.cat( [ reloaded[model_type]["embeddings.weight"][index : index + 1] for index in matching_indices ], dim=0, ) if "pred_layer.proj.weight" in reloaded[model_type]: first_line = reloaded[model_type]["pred_layer.proj.weight"][0:1] embedding_size = reloaded[model_type]["pred_layer.proj.weight"].shape[1] reloaded[model_type]["pred_layer.proj.weight"] = torch.cat( [ reloaded[model_type]["pred_layer.proj.weight"][index : index + 1] if index is not None else torch.normal( torch.zeros_like(first_line), torch.ones_like(first_line * (embedding_size ** (-0.5))), ) for index in matching_indices ],
import GetTargetHttpsProxyRequest from .types.compute import GetTargetInstanceRequest from .types.compute import GetTargetPoolRequest from .types.compute import GetTargetSslProxyRequest from .types.compute import GetTargetTcpProxyRequest from .types.compute import GetTargetVpnGatewayRequest from .types.compute import GetUrlMapRequest from .types.compute import GetVpnGatewayRequest from .types.compute import GetVpnTunnelRequest from .types.compute import GetXpnHostProjectRequest from .types.compute import GetXpnResourcesProjectsRequest from .types.compute import GetZoneOperationRequest from .types.compute import GetZoneRequest from .types.compute import GlobalNetworkEndpointGroupsAttachEndpointsRequest from .types.compute import GlobalNetworkEndpointGroupsDetachEndpointsRequest from .types.compute import GlobalSetLabelsRequest from .types.compute import GlobalSetPolicyRequest from .types.compute import GRPCHealthCheck from .types.compute import GuestAttributes from .types.compute import GuestAttributesEntry from .types.compute import GuestAttributesValue from .types.compute import GuestOsFeature from .types.compute import HealthCheck from .types.compute import HealthCheckList from .types.compute import HealthCheckLogConfig from .types.compute import HealthCheckReference from .types.compute import HealthChecksAggregatedList from .types.compute import HealthCheckService from .types.compute import HealthCheckServiceReference from .types.compute import HealthCheckServicesList from .types.compute import HealthChecksScopedList from .types.compute import HealthStatus from .types.compute import HealthStatusForNetworkEndpoint from .types.compute import HostRule from .types.compute import HTTP2HealthCheck from .types.compute import HttpFaultAbort from .types.compute import HttpFaultDelay from .types.compute import HttpFaultInjection from .types.compute import HttpHeaderAction from .types.compute import HttpHeaderMatch from .types.compute import HttpHeaderOption from .types.compute import HTTPHealthCheck from .types.compute import HttpQueryParameterMatch from .types.compute import HttpRedirectAction from .types.compute import HttpRetryPolicy from .types.compute import HttpRouteAction from .types.compute import HttpRouteRule from .types.compute import HttpRouteRuleMatch from .types.compute import HTTPSHealthCheck from .types.compute import Image from .types.compute import ImageList from .types.compute import InitialStateConfig from .types.compute import InsertAddressRequest from .types.compute import InsertAutoscalerRequest from .types.compute import InsertBackendBucketRequest from .types.compute import InsertBackendServiceRequest from .types.compute import InsertDiskRequest from .types.compute import InsertExternalVpnGatewayRequest from .types.compute import InsertFirewallRequest from .types.compute import InsertForwardingRuleRequest from .types.compute import InsertGlobalAddressRequest from .types.compute import InsertGlobalForwardingRuleRequest from .types.compute import InsertGlobalNetworkEndpointGroupRequest from .types.compute import InsertHealthCheckRequest from .types.compute import InsertImageRequest from .types.compute import InsertInstanceGroupManagerRequest from .types.compute import InsertInstanceGroupRequest from .types.compute import InsertInstanceRequest from .types.compute import InsertInstanceTemplateRequest from .types.compute import InsertInterconnectAttachmentRequest from .types.compute import InsertInterconnectRequest from .types.compute import InsertLicenseRequest from .types.compute import InsertNetworkEndpointGroupRequest from .types.compute import InsertNetworkRequest from .types.compute import InsertNodeGroupRequest from .types.compute import InsertNodeTemplateRequest from .types.compute import InsertPacketMirroringRequest from .types.compute import InsertRegionAutoscalerRequest from .types.compute import InsertRegionBackendServiceRequest from .types.compute import InsertRegionCommitmentRequest from .types.compute import InsertRegionDiskRequest from .types.compute import InsertRegionHealthCheckRequest from .types.compute import InsertRegionHealthCheckServiceRequest from .types.compute import InsertRegionInstanceGroupManagerRequest from .types.compute import InsertRegionNetworkEndpointGroupRequest from .types.compute import InsertRegionNotificationEndpointRequest from .types.compute import InsertRegionSslCertificateRequest from .types.compute import InsertRegionTargetHttpProxyRequest from .types.compute import InsertRegionTargetHttpsProxyRequest from .types.compute import InsertRegionUrlMapRequest from .types.compute import InsertReservationRequest from .types.compute import InsertResourcePolicyRequest from .types.compute import InsertRouteRequest from .types.compute import InsertRouterRequest from .types.compute import InsertSecurityPolicyRequest from .types.compute import InsertSslCertificateRequest from .types.compute import InsertSslPolicyRequest from .types.compute import InsertSubnetworkRequest from .types.compute import InsertTargetGrpcProxyRequest from .types.compute import InsertTargetHttpProxyRequest from .types.compute import InsertTargetHttpsProxyRequest from .types.compute import InsertTargetInstanceRequest from .types.compute import InsertTargetPoolRequest from .types.compute import InsertTargetSslProxyRequest from .types.compute import InsertTargetTcpProxyRequest from .types.compute import InsertTargetVpnGatewayRequest from .types.compute import InsertUrlMapRequest from .types.compute import InsertVpnGatewayRequest from .types.compute import InsertVpnTunnelRequest from .types.compute import Instance from .types.compute import InstanceAggregatedList from .types.compute import InstanceGroup from .types.compute import InstanceGroupAggregatedList from .types.compute import InstanceGroupList from .types.compute import InstanceGroupManager from .types.compute import InstanceGroupManagerActionsSummary from .types.compute import InstanceGroupManagerAggregatedList from .types.compute import InstanceGroupManagerAutoHealingPolicy from .types.compute import InstanceGroupManagerList from .types.compute import InstanceGroupManagersAbandonInstancesRequest from .types.compute import InstanceGroupManagersApplyUpdatesRequest from .types.compute import InstanceGroupManagersCreateInstancesRequest from .types.compute import InstanceGroupManagersDeleteInstancesRequest from .types.compute import InstanceGroupManagersDeletePerInstanceConfigsReq from .types.compute import InstanceGroupManagersListErrorsResponse from .types.compute import InstanceGroupManagersListManagedInstancesResponse from .types.compute import InstanceGroupManagersListPerInstanceConfigsResp from .types.compute import InstanceGroupManagersPatchPerInstanceConfigsReq from .types.compute import InstanceGroupManagersRecreateInstancesRequest from .types.compute import InstanceGroupManagersScopedList from .types.compute import InstanceGroupManagersSetInstanceTemplateRequest from .types.compute import InstanceGroupManagersSetTargetPoolsRequest from .types.compute import InstanceGroupManagerStatus from .types.compute import InstanceGroupManagerStatusStateful from .types.compute import InstanceGroupManagerStatusStatefulPerInstanceConfigs from .types.compute import InstanceGroupManagerStatusVersionTarget from .types.compute import InstanceGroupManagersUpdatePerInstanceConfigsReq from .types.compute import InstanceGroupManagerUpdatePolicy from .types.compute import InstanceGroupManagerVersion from .types.compute import InstanceGroupsAddInstancesRequest from .types.compute import InstanceGroupsListInstances from .types.compute import InstanceGroupsListInstancesRequest from .types.compute import InstanceGroupsRemoveInstancesRequest from .types.compute import InstanceGroupsScopedList from .types.compute import InstanceGroupsSetNamedPortsRequest from .types.compute import InstanceList from .types.compute import InstanceListReferrers from .types.compute import InstanceManagedByIgmError from .types.compute import InstanceManagedByIgmErrorInstanceActionDetails from .types.compute import InstanceManagedByIgmErrorManagedInstanceError from .types.compute import InstanceMoveRequest from .types.compute import InstanceProperties from .types.compute import InstanceReference from .types.compute import InstancesAddResourcePoliciesRequest from .types.compute import InstancesRemoveResourcePoliciesRequest from .types.compute import InstancesScopedList from .types.compute import InstancesSetLabelsRequest from .types.compute import InstancesSetMachineResourcesRequest from .types.compute import InstancesSetMachineTypeRequest from .types.compute import InstancesSetMinCpuPlatformRequest from .types.compute import InstancesSetServiceAccountRequest from .types.compute import InstancesStartWithEncryptionKeyRequest from .types.compute import InstanceTemplate from .types.compute import InstanceTemplateList from .types.compute import InstanceWithNamedPorts from .types.compute import Int64RangeMatch from .types.compute import Interconnect from .types.compute import InterconnectAttachment from .types.compute import InterconnectAttachmentAggregatedList from .types.compute import InterconnectAttachmentList from .types.compute import InterconnectAttachmentPartnerMetadata from .types.compute import InterconnectAttachmentPrivateInfo from .types.compute import InterconnectAttachmentsScopedList from .types.compute import InterconnectCircuitInfo from .types.compute import InterconnectDiagnostics from .types.compute import InterconnectDiagnosticsARPEntry from .types.compute import InterconnectDiagnosticsLinkLACPStatus from .types.compute import InterconnectDiagnosticsLinkOpticalPower from .types.compute import InterconnectDiagnosticsLinkStatus from .types.compute import InterconnectList from .types.compute import InterconnectLocation from .types.compute import InterconnectLocationList from .types.compute import InterconnectLocationRegionInfo from .types.compute import InterconnectOutageNotification from .types.compute import InterconnectsGetDiagnosticsResponse from .types.compute import InvalidateCacheUrlMapRequest from .types.compute import Items from .types.compute import License from .types.compute import LicenseCode from .types.compute import LicenseCodeLicenseAlias from .types.compute import LicenseResourceCommitment from .types.compute import LicenseResourceRequirements from .types.compute import LicensesListResponse from .types.compute import ListAcceleratorTypesRequest from .types.compute import ListAddressesRequest from .types.compute import ListAutoscalersRequest from .types.compute import ListAvailableFeaturesSslPoliciesRequest from .types.compute import ListBackendBucketsRequest from .types.compute import ListBackendServicesRequest from .types.compute import ListDisksRequest from .types.compute import ListDiskTypesRequest from .types.compute import ListErrorsInstanceGroupManagersRequest from .types.compute import ListErrorsRegionInstanceGroupManagersRequest from .types.compute import ListExternalVpnGatewaysRequest from .types.compute import ListFirewallsRequest from .types.compute import ListForwardingRulesRequest from .types.compute import ListGlobalAddressesRequest from .types.compute import ListGlobalForwardingRulesRequest from .types.compute import ListGlobalNetworkEndpointGroupsRequest from .types.compute import ListGlobalOperationsRequest from .types.compute import ListGlobalOrganizationOperationsRequest from .types.compute import ListHealthChecksRequest from .types.compute import ListImagesRequest from .types.compute import ListInstanceGroupManagersRequest from .types.compute import ListInstanceGroupsRequest from .types.compute import ListInstancesInstanceGroupsRequest from .types.compute import ListInstancesRegionInstanceGroupsRequest from .types.compute import ListInstancesRequest from .types.compute import ListInstanceTemplatesRequest from .types.compute import ListInterconnectAttachmentsRequest from .types.compute import ListInterconnectLocationsRequest from .types.compute import ListInterconnectsRequest from .types.compute import ListLicensesRequest from .types.compute import ListMachineTypesRequest from .types.compute import ListManagedInstancesInstanceGroupManagersRequest from .types.compute import ListManagedInstancesRegionInstanceGroupManagersRequest from .types.compute import ListNetworkEndpointGroupsRequest from .types.compute import ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest from .types.compute import ListNetworkEndpointsNetworkEndpointGroupsRequest from .types.compute import ListNetworksRequest from .types.compute import ListNodeGroupsRequest from .types.compute import ListNodesNodeGroupsRequest from .types.compute import ListNodeTemplatesRequest from .types.compute import ListNodeTypesRequest from .types.compute import ListPacketMirroringsRequest from .types.compute import ListPeeringRoutesNetworksRequest from .types.compute import ListPerInstanceConfigsInstanceGroupManagersRequest from .types.compute import ListPerInstanceConfigsRegionInstanceGroupManagersRequest from .types.compute import ListPreconfiguredExpressionSetsSecurityPoliciesRequest from .types.compute import ListReferrersInstancesRequest from .types.compute import ListRegionAutoscalersRequest from .types.compute import ListRegionBackendServicesRequest from .types.compute import ListRegionCommitmentsRequest from .types.compute import ListRegionDisksRequest from .types.compute import ListRegionDiskTypesRequest from .types.compute import ListRegionHealthCheckServicesRequest from .types.compute import ListRegionHealthChecksRequest from .types.compute import ListRegionInstanceGroupManagersRequest from .types.compute import ListRegionInstanceGroupsRequest from .types.compute import ListRegionNetworkEndpointGroupsRequest from .types.compute import ListRegionNotificationEndpointsRequest from .types.compute import ListRegionOperationsRequest from .types.compute import ListRegionsRequest from .types.compute import ListRegionSslCertificatesRequest from .types.compute import ListRegionTargetHttpProxiesRequest from .types.compute import ListRegionTargetHttpsProxiesRequest from .types.compute import ListRegionUrlMapsRequest from .types.compute import ListReservationsRequest from .types.compute import ListResourcePoliciesRequest from .types.compute import ListRoutersRequest from .types.compute import ListRoutesRequest from .types.compute import ListSecurityPoliciesRequest from .types.compute import ListSnapshotsRequest from .types.compute import ListSslCertificatesRequest from .types.compute import ListSslPoliciesRequest from .types.compute import ListSubnetworksRequest from .types.compute import ListTargetGrpcProxiesRequest from .types.compute import ListTargetHttpProxiesRequest from .types.compute import ListTargetHttpsProxiesRequest from .types.compute import ListTargetInstancesRequest from .types.compute import ListTargetPoolsRequest from .types.compute import ListTargetSslProxiesRequest from .types.compute import ListTargetTcpProxiesRequest from .types.compute import ListTargetVpnGatewaysRequest from .types.compute import ListUrlMapsRequest from .types.compute import ListUsableSubnetworksRequest from .types.compute import ListVpnGatewaysRequest from .types.compute import ListVpnTunnelsRequest from .types.compute import ListXpnHostsProjectsRequest from .types.compute import ListZoneOperationsRequest from .types.compute import ListZonesRequest from .types.compute import LocalDisk from .types.compute import LogConfig from .types.compute import LogConfigCloudAuditOptions from .types.compute import LogConfigCounterOptions from .types.compute import LogConfigCounterOptionsCustomField from .types.compute import LogConfigDataAccessOptions from .types.compute import MachineType from .types.compute import MachineTypeAggregatedList from .types.compute import MachineTypeList from .types.compute import MachineTypesScopedList from .types.compute import ManagedInstance from .types.compute import ManagedInstanceInstanceHealth from .types.compute import ManagedInstanceLastAttempt from .types.compute import ManagedInstanceVersion from .types.compute import Metadata from .types.compute import MetadataFilter from .types.compute import MetadataFilterLabelMatch from .types.compute import MoveDiskProjectRequest from .types.compute import MoveInstanceProjectRequest from .types.compute import NamedPort from .types.compute import Network from .types.compute import NetworkEndpoint from .types.compute import NetworkEndpointGroup from .types.compute import NetworkEndpointGroupAggregatedList from .types.compute import NetworkEndpointGroupAppEngine from .types.compute import NetworkEndpointGroupCloudFunction from .types.compute import NetworkEndpointGroupCloudRun from .types.compute import NetworkEndpointGroupList from .types.compute import NetworkEndpointGroupsAttachEndpointsRequest from .types.compute import NetworkEndpointGroupsDetachEndpointsRequest from .types.compute import NetworkEndpointGroupsListEndpointsRequest from .types.compute import NetworkEndpointGroupsListNetworkEndpoints from .types.compute import NetworkEndpointGroupsScopedList from .types.compute import NetworkEndpointWithHealthStatus from .types.compute import NetworkInterface from .types.compute import NetworkList from .types.compute import NetworkPeering from .types.compute import NetworkRoutingConfig from .types.compute import NetworksAddPeeringRequest from .types.compute import NetworksRemovePeeringRequest from .types.compute import NetworksUpdatePeeringRequest from .types.compute import NodeGroup from .types.compute import NodeGroupAggregatedList from .types.compute import NodeGroupAutoscalingPolicy from .types.compute import NodeGroupList from .types.compute import NodeGroupMaintenanceWindow from .types.compute import NodeGroupNode from .types.compute import NodeGroupsAddNodesRequest from .types.compute import NodeGroupsDeleteNodesRequest from .types.compute import NodeGroupsListNodes from .types.compute import NodeGroupsScopedList from .types.compute import NodeGroupsSetNodeTemplateRequest from .types.compute import NodeTemplate from .types.compute import NodeTemplateAggregatedList from .types.compute import NodeTemplateList from .types.compute import NodeTemplateNodeTypeFlexibility from .types.compute import NodeTemplatesScopedList from .types.compute import NodeType from .types.compute import NodeTypeAggregatedList from .types.compute import NodeTypeList from .types.compute import NodeTypesScopedList from .types.compute import NotificationEndpoint from .types.compute import NotificationEndpointGrpcSettings from .types.compute import NotificationEndpointList from .types.compute import Operation from .types.compute import OperationAggregatedList from .types.compute import OperationList from .types.compute import OperationsScopedList from .types.compute import OutlierDetection from .types.compute import PacketMirroring from .types.compute import PacketMirroringAggregatedList from .types.compute import PacketMirroringFilter from .types.compute import PacketMirroringForwardingRuleInfo from .types.compute import PacketMirroringList from .types.compute import PacketMirroringMirroredResourceInfo from .types.compute import PacketMirroringMirroredResourceInfoInstanceInfo from .types.compute import PacketMirroringMirroredResourceInfoSubnetInfo from .types.compute import PacketMirroringNetworkInfo from .types.compute import PacketMirroringsScopedList from .types.compute import PatchAutoscalerRequest from .types.compute import PatchBackendBucketRequest from .types.compute import PatchBackendServiceRequest from .types.compute import PatchFirewallRequest from .types.compute import PatchForwardingRuleRequest from .types.compute import PatchGlobalForwardingRuleRequest from .types.compute import PatchHealthCheckRequest from .types.compute import PatchImageRequest from .types.compute import PatchInstanceGroupManagerRequest from .types.compute import PatchInterconnectAttachmentRequest from .types.compute import PatchInterconnectRequest from .types.compute import PatchNetworkRequest from .types.compute import PatchNodeGroupRequest from .types.compute import PatchPacketMirroringRequest from .types.compute import PatchPerInstanceConfigsInstanceGroupManagerRequest from .types.compute import PatchPerInstanceConfigsRegionInstanceGroupManagerRequest from .types.compute import PatchRegionAutoscalerRequest from .types.compute import PatchRegionBackendServiceRequest from .types.compute import PatchRegionHealthCheckRequest from .types.compute import PatchRegionHealthCheckServiceRequest from .types.compute import PatchRegionInstanceGroupManagerRequest from .types.compute import PatchRegionUrlMapRequest from .types.compute import PatchRouterRequest from .types.compute import PatchRuleSecurityPolicyRequest from .types.compute import PatchSecurityPolicyRequest from .types.compute import PatchSslPolicyRequest from .types.compute import PatchSubnetworkRequest from .types.compute import PatchTargetGrpcProxyRequest from .types.compute import PatchTargetHttpProxyRequest from .types.compute import PatchUrlMapRequest from .types.compute import PathMatcher from .types.compute import PathRule from .types.compute import PerInstanceConfig from .types.compute import Policy from .types.compute import PreconfiguredWafSet from .types.compute import PreservedState from .types.compute import PreservedStatePreservedDisk from .types.compute import PreviewRouterRequest from .types.compute import Project from .types.compute import ProjectsDisableXpnResourceRequest from .types.compute import ProjectsEnableXpnResourceRequest from .types.compute import ProjectsGetXpnResources from .types.compute import ProjectsListXpnHostsRequest from .types.compute import ProjectsSetDefaultNetworkTierRequest from .types.compute import Quota from .types.compute import RawDisk from .types.compute import RecreateInstancesInstanceGroupManagerRequest from .types.compute import RecreateInstancesRegionInstanceGroupManagerRequest from .types.compute import Reference from .types.compute import Region from .types.compute import RegionAutoscalerList from .types.compute import RegionDisksAddResourcePoliciesRequest from .types.compute import RegionDisksRemoveResourcePoliciesRequest from .types.compute import RegionDisksResizeRequest from .types.compute import RegionDiskTypeList from .types.compute import RegionInstanceGroupList from .types.compute import RegionInstanceGroupManagerDeleteInstanceConfigReq from .types.compute import RegionInstanceGroupManagerList from .types.compute import RegionInstanceGroupManagerPatchInstanceConfigReq from .types.compute import RegionInstanceGroupManagersAbandonInstancesRequest from .types.compute import RegionInstanceGroupManagersApplyUpdatesRequest from .types.compute import RegionInstanceGroupManagersCreateInstancesRequest from .types.compute import RegionInstanceGroupManagersDeleteInstancesRequest from .types.compute import RegionInstanceGroupManagersListErrorsResponse from .types.compute import RegionInstanceGroupManagersListInstanceConfigsResp from .types.compute import RegionInstanceGroupManagersListInstancesResponse from .types.compute import RegionInstanceGroupManagersRecreateRequest from
= 'ou=configuration,o=gluu' ldap_conn.search( search_base=dn, search_scope=BASE, search_filter='(objectclass=*)', attributes=['oxIDPAuthentication'] ) oxIDPAuthentication = json.loads(ldap_conn.response[0]['attributes']['oxIDPAuthentication'][0]) oxIDPAuthentication['config']['servers'] = config_servers oxIDPAuthentication_js = json.dumps(oxIDPAuthentication, indent=2) ldap_conn.modify(dn, {'oxIDPAuthentication': [MODIFY_REPLACE, oxIDPAuthentication_js]}) ldap_conn.unbind() else: for k, v in oxAuthConfDynamic_changes: query = 'UPDATE gluu USE KEYS "configuration_oxauth" set gluu.oxAuthConfDynamic.{0}={1}'.format(k, json.dumps(v)) self.exec_n1ql_query(query) for inum in custom_scripts: query = 'UPDATE gluu USE KEYS "scripts_{0}" set gluu.oxEnabled=true'.format(inum) self.exec_n1ql_query(query) self.exec_n1ql_query('CREATE INDEX def_gluu_myCustomAttr1 ON `gluu`(myCustomAttr1) USING GSI WITH {"defer_build":true}') self.exec_n1ql_query('CREATE INDEX def_gluu_myCustomAttr2 ON `gluu`(myCustomAttr2) USING GSI WITH {"defer_build":true}') self.exec_n1ql_query('BUILD INDEX ON `gluu` (def_gluu_myCustomAttr1, def_gluu_myCustomAttr2)') #query = 'UPDATE gluu USE KEYS "configuration" set gluu.oxIDPAuthentication.config.servers = {0}'.format(json.dumps(config_servers)) #self.exec_n1ql_query(query) self.create_test_client_keystore() # Disable token binding module if self.os_type+self.os_version == 'ubuntu18': self.run(['a2dismod', 'mod_token_binding']) self.run_service_command('apache2', 'restart') self.run_service_command('oxauth', 'restart') # Prepare for tests run #install_command, update_command, query_command, check_text = self.get_install_commands() #self.run_command(install_command.format('git')) #self.run([self.cmd_mkdir, '-p', 'oxAuth/Client/profiles/ce_test']) #self.run([self.cmd_mkdir, '-p', 'oxAuth/Server/profiles/ce_test']) # Todo: Download and unzip file test_data.zip from CE server. # Todo: Copy files from unziped folder test/oxauth/client/* into oxAuth/Client/profiles/ce_test # Todo: Copy files from unziped folder test/oxauth/server/* into oxAuth/Server/profiles/ce_test #self.run([self.cmd_keytool, '-import', '-alias', 'seed22.gluu.org_httpd', '-keystore', 'cacerts', '-file', '%s/httpd.crt' % self.certFolder, '-storepass', 'changeit', '-noprompt']) #self.run([self.cmd_keytool, '-import', '-alias', 'seed22.gluu.org_opendj', '-keystore', 'cacerts', '-file', '%s/opendj.crt' % self.certFolder, '-storepass', 'changeit', '-noprompt']) def load_test_data_exit(self): print("Loading test data") prop_file = os.path.join(self.install_dir, 'setup.properties.last') if not os.path.exists(prop_file): prop_file += '.enc' if not os.path.exists(prop_file): print("setup.properties.last or setup.properties.last.enc were not found, exiting.") sys.exit(1) self.load_properties(prop_file) self.createLdapPw() self.load_test_data() self.deleteLdapPw() print("Test data loaded. Exiting ...") sys.exit() def fix_systemd_script(self): oxauth_systemd_script_fn = '/lib/systemd/system/oxauth.service' if os.path.exists(oxauth_systemd_script_fn): oxauth_systemd_script = open(oxauth_systemd_script_fn).read() changed = False if self.cb_install == LOCAL: oxauth_systemd_script = oxauth_systemd_script.replace('After=opendj.service', 'After=couchbase-server.service') oxauth_systemd_script = oxauth_systemd_script.replace('Requires=opendj.service', 'Requires=couchbase-server.service') changed = True elif self.wrends_install != LOCAL: oxauth_systemd_script = oxauth_systemd_script.replace('After=opendj.service', '') oxauth_systemd_script = oxauth_systemd_script.replace('Requires=opendj.service', '') changed = True if changed: with open(oxauth_systemd_script_fn, 'w') as w: w.write(oxauth_systemd_script) self.run(['rm', '-f', '/lib/systemd/system/opendj.service']) self.run([self.systemctl, 'daemon-reload']) def install_oxd(self): self.logIt("Installing oxd server...") oxd_root = '/opt/oxd-server/' oxd_server_yml_fn = os.path.join(oxd_root, 'conf/oxd-server.yml') self.run(['tar', '-zxf', self.oxd_package, '-C', '/opt']) self.run(['chown', '-R', 'jetty:jetty', oxd_root]) service_file = os.path.join(oxd_root, 'oxd-server.service') if os.path.exists(service_file): self.run(['cp', service_file, '/lib/systemd/system']) else: service_file = os.path.join(oxd_root, 'oxd-server.init.d') target_file = '/etc/init.d/oxd-server' self.run(['cp', service_file, target_file]) self.run(['chmod', '+x', target_file]) self.run(['update-rc.d', 'oxd-server', 'defaults']) self.run(['cp', os.path.join(oxd_root, 'oxd-server-default'), '/etc/default/oxd-server']) self.run(['mkdir', '/var/log/oxd-server']) self.run(['chown', 'jetty:jetty', '/var/log/oxd-server']) for fn in glob.glob(os.path.join(oxd_root,'bin/*')): self.run(['chmod', '+x', fn]) if self.oxd_use_gluu_storage: oxd_server_yml_fn = os.path.join(oxd_root, 'conf/oxd-server.yml') yml_str = self.readFile(oxd_server_yml_fn) oxd_yaml = ruamel.yaml.load(yml_str, ruamel.yaml.RoundTripLoader) oxd_yaml['storage_configuration'].pop('dbFileLocation') oxd_yaml['storage'] = 'gluu_server_configuration' oxd_yaml['storage_configuration']['type'] = self.gluu_properties_fn oxd_yaml['storage_configuration']['connection'] = self.ox_ldap_properties \ if self.mappingLocations['default'] == 'ldap' else self.gluuCouchebaseProperties try: oxd_yaml.yaml_set_comment_before_after_key('server', '\nConnectors') except: pass yml_str = ruamel.yaml.dump(oxd_yaml, Dumper=ruamel.yaml.RoundTripDumper) self.writeFile(oxd_server_yml_fn, yml_str) self.enable_service_at_start('oxd-server') def install_casa(self): self.logIt("Installing Casa...") self.run(['chmod', 'g+w', '/opt/gluu/python/libs']) self.logIt("Copying casa.war into jetty webapps folder...") self.installJettyService(self.jetty_app_configuration['casa']) jettyServiceWebapps = os.path.join(self.jetty_base, 'casa', 'webapps' ) self.copyFile( os.path.join(self.distGluuFolder, 'casa.war'), jettyServiceWebapps ) jettyServiceOxAuthCustomLibsPath = os.path.join(self.jetty_base, "oxauth", "custom/libs" ) self.copyFile( os.path.join(self.distGluuFolder, 'twilio-{0}.jar'.format(self.twilio_version)), jettyServiceOxAuthCustomLibsPath ) self.copyFile( os.path.join(self.distGluuFolder, 'jsmpp-{}.jar'.format(self.jsmmp_version)), jettyServiceOxAuthCustomLibsPath ) self.run([self.cmd_chown, '-R', 'jetty:jetty', jettyServiceOxAuthCustomLibsPath]) # Make necessary Directories for Casa for path in ('/opt/gluu/jetty/casa/static/', '/opt/gluu/jetty/casa/plugins'): if not os.path.exists(path): self.run(['mkdir', '-p', path]) self.run(['chown', '-R', 'jetty:jetty', path]) #Adding twilio jar path to oxauth.xml oxauth_xml_fn = '/opt/gluu/jetty/oxauth/webapps/oxauth.xml' if os.path.exists(oxauth_xml_fn): class CommentedTreeBuilder(ElementTree.TreeBuilder): def comment(self, data): self.start(ElementTree.Comment, {}) self.data(data) self.end(ElementTree.Comment) parser = ElementTree.XMLParser(target=CommentedTreeBuilder()) tree = ElementTree.parse(oxauth_xml_fn, parser) root = tree.getroot() xml_headers = '<?xml version="1.0" encoding="ISO-8859-1"?>\n<!DOCTYPE Configure PUBLIC "-//Jetty//Configure//EN" "http://www.eclipse.org/jetty/configure_9_0.dtd">\n\n' for element in root: if element.tag == 'Set' and element.attrib.get('name') == 'extraClasspath': break else: element = ElementTree.SubElement(root, 'Set', name='extraClasspath') element.text = '' extraClasspath_list = element.text.split(',') for ecp in extraClasspath_list[:]: if (not ecp) or re.search('twilio-(.*)\.jar', ecp) or re.search('jsmpp-(.*)\.jar', ecp): extraClasspath_list.remove(ecp) extraClasspath_list.append('./custom/libs/twilio-{}.jar'.format(self.twilio_version)) extraClasspath_list.append('./custom/libs/jsmpp-{}.jar'.format(self.jsmmp_version)) element.text = ','.join(extraClasspath_list) self.writeFile(oxauth_xml_fn, xml_headers+ElementTree.tostring(root).decode('utf-8')) pylib_folder = os.path.join(self.gluuOptPythonFolder, 'libs') for script_fn in glob.glob(os.path.join(self.staticFolder, 'casa/scripts/*.*')): self.run(['cp', script_fn, pylib_folder]) self.enable_service_at_start('casa') def parse_url(self, url): o = urlparse(url) return o.hostname, o.port def install_gluu_radius_base(self): if not self.gluu_radius_client_id: self.gluu_radius_client_id = '1701.' + str(uuid.uuid4()) source_dir = os.path.join(self.staticFolder, 'radius') conf_dir = os.path.join(self.gluuBaseFolder, 'conf/radius/') self.createDirs(conf_dir) self.radius_jwt_pass = self.getPW() radius_jwt_pass = self.obscure(self.radius_jwt_pass) radius_jks_fn = os.path.join(self.certFolder, 'gluu-radius.jks') self.raidus_client_jwks = self.gen_openid_jwks_jks_keys(radius_jks_fn, self.radius_jwt_pass) raidus_client_jwks = ''.join(self.raidus_client_jwks).replace('\'','').replace(',,',',').replace('{,','{') raidus_client_jwks = json.loads(raidus_client_jwks) self.templateRenderingDict['radius_jwt_pass'] = <PASSWORD> raidus_client_jwks_json = json.dumps(raidus_client_jwks, indent=2) self.templateRenderingDict['gluu_ro_client_base64_jwks'] = base64.encodestring(raidus_client_jwks_json.encode('utf-8')).decode('utf-8').replace(' ','').replace('\n','') for k in raidus_client_jwks['keys']: if k.get('alg') == 'RS512': self.templateRenderingDict['radius_jwt_keyId'] = k['kid'] self.gluu_ro_pw = self.getPW() self.gluu_ro_encoded_pw = self.obscure(self.gluu_ro_pw) scripts_dir = os.path.join(source_dir,'scripts') for scriptFile, scriptName in ( ('super_gluu_ro_session.py', 'super_gluu_ro_session_script'), ('super_gluu_ro.py','super_gluu_ro_script'), ): scriptFilePath = os.path.join(scripts_dir, scriptFile) base64ScriptFile = self.generate_base64_file(scriptFilePath, 1) self.templateRenderingDict[scriptName] = base64ScriptFile for tmp_ in ('gluu_radius_base.ldif', 'gluu_radius_clients.ldif', 'gluu_radius_server.ldif'): tmp_fn = os.path.join(source_dir, 'templates', tmp_) self.renderTemplateInOut(tmp_fn, os.path.join(source_dir, 'templates'), self.outputFolder) self.renderTemplateInOut('gluu-radius.properties', os.path.join(source_dir, 'etc/gluu/conf/radius/'), conf_dir) ldif_file_clients = os.path.join(self.outputFolder, 'gluu_radius_clients.ldif') ldif_file_base = os.path.join(self.outputFolder, 'gluu_radius_base.ldif') if self.mappingLocations['default'] == 'ldap': self.import_ldif_opendj([ldif_file_base, ldif_file_clients]) else: self.import_ldif_couchebase([ldif_file_base, ldif_file_clients]) if self.installGluuRadius: self.install_gluu_radius() def install_gluu_radius(self): self.pbar.progress("radius", "Installing Gluu components: Radius", False) radius_libs = os.path.join(self.distGluuFolder, 'gluu-radius-libs.zip') radius_jar = os.path.join(self.distGluuFolder, 'super-gluu-radius-server.jar') conf_dir = os.path.join(self.gluuBaseFolder, 'conf/radius/') ldif_file_server = os.path.join(self.outputFolder, 'gluu_radius_server.ldif') source_dir = os.path.join(self.staticFolder, 'radius') logs_dir = os.path.join(self.radius_dir,'logs') if not os.path.exists(logs_dir): self.run([self.cmd_mkdir, '-p', logs_dir]) self.run(['unzip', '-n', '-q', radius_libs, '-d', self.radius_dir ]) self.copyFile(radius_jar, self.radius_dir) if self.mappingLocations['default'] == 'ldap': schema_ldif = os.path.join(source_dir, 'schema/98-radius.ldif') self.import_ldif_opendj([schema_ldif]) self.import_ldif_opendj([ldif_file_server]) else: self.import_ldif_couchebase([ldif_file_server]) self.copyFile(os.path.join(source_dir, 'etc/default/gluu-radius'), self.osDefault) self.copyFile(os.path.join(source_dir, 'etc/gluu/conf/radius/gluu-radius-logging.xml'), conf_dir) self.copyFile(os.path.join(source_dir, 'scripts/gluu_common.py'), os.path.join(self.gluuOptPythonFolder, 'libs')) self.copyFile(os.path.join(source_dir, 'etc/init.d/gluu-radius'), '/etc/init.d') self.run([self.cmd_chmod, '+x', '/etc/init.d/gluu-radius']) if self.os_type+self.os_version == 'ubuntu16': self.run(['update-rc.d', 'gluu-radius', 'defaults']) else: self.copyFile(os.path.join(source_dir, 'systemd/gluu-radius.service'), '/usr/lib/systemd/system') self.run([self.systemctl, 'daemon-reload']) #create empty gluu-radius.private-key.pem gluu_radius_private_key_fn = os.path.join(self.certFolder, 'gluu-radius.private-key.pem') self.writeFile(gluu_radius_private_key_fn, '') self.run([self.cmd_chown, '-R', 'radius:gluu', self.radius_dir]) self.run([self.cmd_chown, '-R', 'root:gluu', conf_dir]) self.run([self.cmd_chown, 'root:gluu', os.path.join(self.gluuOptPythonFolder, 'libs/gluu_common.py')]) self.run([self.cmd_chown, 'radius:gluu', os.path.join(self.certFolder, 'gluu-radius.jks')]) self.run([self.cmd_chown, 'radius:gluu', os.path.join(self.certFolder, 'gluu-radius.private-key.pem')]) self.run([self.cmd_chmod, '755', self.radius_dir]) self.run([self.cmd_chmod, '660', os.path.join(self.certFolder, 'gluu-radius.jks')]) self.run([self.cmd_chmod, '660', os.path.join(self.certFolder, 'gluu-radius.private-key.pem')]) self.enable_service_at_start('gluu-radius') def post_install_tasks(self): super_gluu_lisence_renewer_fn = os.path.join(self.staticFolder, 'scripts', 'super_gluu_license_renewer.py') target_fn = '/etc/cron.daily/super_gluu_lisence_renewer' self.run(['cp', '-f', super_gluu_lisence_renewer_fn, target_fn]) self.run(['chown', 'root:root', target_fn]) self.run(['chmod', '+x', target_fn]) cron_service = 'cron' if self.os_type in ['centos', 'red', 'fedora']: cron_service = 'crond' self.run_service_command(cron_service, 'restart') print_version_fn = os.path.join(self.install_dir, 'pylib', 'printVersion.py') show_version_fn = os.path.join(self.gluuOptBinFolder, 'show_version.py') self.run(['cp', '-f', print_version_fn, show_version_fn]) self.run(['chmod', '+x', show_version_fn]) def do_installation(self, queue=None): try: self.thread_queue = queue self.pbar = ProgressBar(cols=tty_columns, queue=self.thread_queue) self.pbar.progress("gluu", "Configuring system") self.configureSystem() self.pbar.progress("download", "Downloading War files") self.downloadWarFiles() self.pbar.progress("gluu", "Calculating application memory") self.calculate_selected_aplications_memory() self.pbar.progress("java", "Installing JRE") self.installJRE() self.pbar.progress("jetty", "Installing Jetty") self.installJetty() self.pbar.progress("jython", "Installing Jython") self.installJython() self.pbar.progress("node", "Installing Node") self.installNode() self.pbar.progress("gluu", "Making salt") self.make_salt() self.pbar.progress("gluu", "Making oxauth salt") self.make_oxauth_salt() self.pbar.progress("scripts", "Copying scripts") self.copy_scripts() self.pbar.progress("gluu", "Encoding passwords") self.encode_passwords() self.pbar.progress("gluu", "Encoding test passwords") self.encode_test_passwords() if self.installPassport: self.generate_passport_configuration() self.pbar.progress("gluu", "Installing Gluu base") self.install_gluu_base() self.pbar.progress("gluu", "Preparing base64 extention scripts") self.prepare_base64_extension_scripts() self.pbar.progress("gluu", "Rendering templates") self.render_templates() self.pbar.progress("gluu", "Generating crypto") self.generate_crypto() self.pbar.progress("gluu","Generating oxauth openid keys") self.generate_oxauth_openid_keys() self.pbar.progress("gluu", "Generating base64 configuration") self.generate_base64_configuration() self.pbar.progress("gluu", "Rendering configuratipn template") self.render_configuration_template() self.pbar.progress("gluu", "Updating hostname") self.update_hostname() self.pbar.progress("gluu", "Setting ulimits") self.set_ulimits() self.pbar.progress("gluu", "Copying output") self.copy_output() self.pbar.progress("gluu", "Setting up init scripts") self.setup_init_scripts() self.pbar.progress("node", "Rendering node templates") self.render_node_templates() self.pbar.progress("gluu", "Installing Gluu components") self.install_gluu_components() self.pbar.progress("gluu", "Rendering test templates") self.render_test_templates() self.pbar.progress("gluu", "Copying static") self.copy_static() self.fix_systemd_script() self.pbar.progress("gluu", "Setting ownerships") self.set_ownership() self.pbar.progress("gluu", "Setting permissions") self.set_permissions() self.pbar.progress("gluu", "Starting services") self.start_services() self.pbar.progress("gluu", "Saving properties") self.save_properties() if setupOptions['loadTestData']: self.pbar.progress("gluu", "Loading test data", False) self.load_test_data() if 'importLDIFDir' in list(setupOptions.keys()): self.pbar.progress("gluu", "Importing LDIF files") self.render_custom_templates(setupOptions['importLDIFDir']) self.import_custom_ldif(setupOptions['importLDIFDir']) self.deleteLdapPw() self.post_install_tasks() self.pbar.progress("gluu", "Completed") if not self.thread_queue: print() self.print_post_messages() except: if self.thread_queue: self.thread_queue.put((ERROR, "", str(traceback.format_exc()))) else: installObject.logIt("***** Error caught in main loop *****", True) installObject.logIt(traceback.format_exc(), True) print("***** Error caught in main loop *****") print(traceback.format_exc()) def print_post_messages(self): print() for m in self.post_messages: print(m) ############################ Main Loop ################################################# file_max = int(open("/proc/sys/fs/file-max").read().strip()) current_mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES') current_mem_size = round(current_mem_bytes / (1024.**3), 1) #in GB current_number_of_cpu = multiprocessing.cpu_count() disk_st = os.statvfs('/') available_disk_space = disk_st.f_bavail * disk_st.f_frsize / (1024 * 1024 *1024) def resource_checkings(): if file_max < 64000: print(("{0}Maximum number of files that can be opened on this computer is " "less than 64000. Please increase number of file-max on the " "host system and re-run setup.py{1}".format(gluu_utils.colors.DANGER, gluu_utils.colors.ENDC))) sys.exit(1) if current_mem_size < suggested_mem_size: print(("{0}Warning: RAM size was determined to be {1:0.1f} GB. This is less " "than the suggested RAM size of {2} GB.{3}").format(gluu_utils.colors.WARNING, current_mem_size, suggested_mem_size, gluu_utils.colors.ENDC)) result = input("Proceed anyways? [Y|n] ") if result and result[0].lower() == 'n': sys.exit() if current_number_of_cpu < suggested_number_of_cpu: print(("{0}Warning: Available CPU Units found was {1}. " "This is less than the required amount of {2} CPU Units.{3}".format( gluu_utils.colors.WARNING, current_number_of_cpu, suggested_number_of_cpu, gluu_utils.colors.ENDC))) result = input("Proceed anyways? [Y|n] ") if result and result[0].lower() == 'n': sys.exit() if available_disk_space < suggested_free_disk_space: print(("{0}Warning: Available free disk space was determined to be {1} " "GB. This is less than the
<filename>cryoorigami/origamiem.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- # @Date : 2018-11-09 11:27:03 # @Author : <NAME> (<EMAIL>) # @Link : http://example.org # @Version : $Id$ import os import re import glob import yaml import numpy as np import pandas as pd import cryoorigami.utilities as util import mrcfile import subprocess import sys import scipy.ndimage import cryoorigami.parallelem as parallelem import multiprocessing import shutil import sqlite3 import matplotlib.pyplot as py import cryoorigami.barcode as barcode import xml.etree.ElementTree as ET from collections import Counter from mpl_toolkits.mplot3d import Axes3D from shutil import copyfile from matplotlib.colors import LogNorm from matplotlib.ticker import FormatStrFormatter from skimage import feature class Relion: def __init__(self): self.name = None class Project: ''' Project File ''' def __init__(self, name='EMProject'): self.name = name self.particle_star = None self.ref_class_star = None self.micrograph_star = None self.class_ids = None # Particle props self.particle_diameter_A = None self.particle_radius_pix = None # Input files self.particle_star_file = None self.ref_class_star_file = None self.micrograph_star_file = None # Output files self.particle_out_file = None self.ref_class_out_file = None # Micrograph pixel size self.mic_apix = None self.particle_apix = None self.ref_apix = None # Micrograph files self.first_mic_file = None self.first_mic_mrc = None # MRC files and objects self.particle_mrc = None self.ref_class_mrc = None self.consensus_class_mrc = None self.particle_mrc_file = None self.ref_class_mrc_file = None self.consensus_class_mrc_file = None # Metadata file self.metadata = None self.metadata_file = None # Micrograph dimensions self.mic_NX = 0 self.mic_NY = 0 self.mic_NZ = 0 # Cryosparc objects self.particle_cs = None self.ref_class_cs = None # Cryosparc files self.blob_cs_file = None self.ref_class_cs_file = None self.passthrough_cs_file = None self.original_star_file = None # Additional data frames self.particle_data_props = pd.DataFrame(columns=['insideFrame']) # Alignment references self.ref_align_star_file = None self.ref_align_mrc_file = None # Alignment mask file self.mask_align_mrc_file = None self.mask_subtract_mrc_file = None # First particle and class mrc files self.first_particle_mrc = None self.first_particle_mrc_file = None self.first_ref_class_mrc = None self.first_ref_class_mrc_file = None # Low and highpass filters self.highpass_angstrom = None self.lowpass_angstrom = None # Relion code self.relion_refine_exe = 'relion_refine' self.relion_refine_args = [] self.relion_norm_exe = 'relion_preprocess' self.relion_norm_args = [] self.relion_image_handler_exe = 'relion_image_handler' self.relion_flip_args = [] self.relion_noflip_args = [] # Particles and class2D models self.particles = [] self.class2Ds = [] self.class3Ds = [] # Star files to merge self.particle_star_files = [] self.other_star = None # Metadata file self.metadata = None self.metadata_file = None # Geometrical parameters self.direction = None self.diff_tilt = None self.diff_psi = None self.diff_rot = None def copy_micrographs(self, dest='Mics'): self.particle_star.copy_micrographs(self.output_directory+'/'+dest) def write_metadata(self): ''' Write metadata ''' if self.metadata is not None and self.metadata_file is not None: self.metadata.to_csv(self.metadata_file, header=True, index=False, sep='\t') def set_particle_num(self, num=None): ''' Set particle number ''' if num is not None and self.particle_star is not None: self.particle_star.data_block = self.particle_star.data_block.loc[:num, :] def get_numptcls(self): # Print particle number info if self.particle_star is not None: num_ptcls = self.particle_star.data_block.shape[0] print('Number of particles: %d' % (num_ptcls)) return num_ptcls def read_ptcl_mrc_paths(self): ''' Read ptcl mrc paths ''' self.particle_star.read_mrc_paths() def group_ptcls(method='defocus'): ''' Grouping method ''' # Sort particles using defocus or intensity scale def pick_random_set(self, num_ptcls, rand_seed=1): ''' Pick random subset of the particles ''' self.particle_star.pick_random_set(num_ptcls, rand_seed) def invert_psi(self): ''' Invert psi angle ''' if self.particle_star.has_label('rlnAnglePsi'): self.particle_star.data_block['rlnAnglePsi'] *= -1.0 def invert_angles(self): ''' Invert psi/rot/tilt angles ''' if self.particle_star.has_label('rlnAnglePsi'): self.particle_star.data_block['rlnAnglePsi'] *= -1.0 if self.particle_star.has_label('rlnAngleRot'): self.particle_star.data_block['rlnAngleRot'] *= -1.0 if self.particle_star.has_label('rlnAngleTilt'): self.particle_star.data_block['rlnAngleTilt'] *= -1.0 def invert_origin(self): ''' Invert origin ''' if self.particle_star.has_label('rlnOriginX'): self.particle_star.data_block['rlnOriginX'] *= -1.0 if self.particle_star.has_label('rlnOriginY'): self.particle_star.data_block['rlnOriginY'] *= -1.0 def set_highpass_filter(self, hp=None): ''' Set highpass filter ''' if hp is not None: self.highpass_angstrom = hp def set_lowpass_filter(self, lp=None): ''' Set lowpass filter ''' if lp is not None: self.lowpass_angstrom = lp def set_particle_radius(self): ''' Set particle radius from particle diameter in Angstrom ''' if self.particle_diameter_A is not None and self.particle_apix is not None: self.particle_radius_pix = int(self.particle_diameter_A//(2*self.particle_apix)) def select_by_barcode(self, barcode_list): ''' Select by barcode ''' self.particle_star.select_by_barcode(barcode_list) def delete_by_barcode(self, barcode_list): ''' Select by barcode ''' self.particle_star.delete_by_barcode(barcode_list) def apply_barcode(self, offsetrot): ''' Apply barcode on particle star ''' new_data_block = barcode.Frame_angle(self.particle_star.get_data_block(), offsetrot) self.particle_star.set_data_block(new_data_block) def append_particle_barcode(self, barcode={}): ''' Append particle barcode ''' if self.particle_star is not None: self.particle_star.append_barcode(barcode) def set_particle_barcode(self, barcode={}): ''' Append particle barcode ''' if self.particle_star is not None: self.particle_star.set_barcode(barcode) def rename_columns(self, column_params): ''' Rename columns ''' self.particle_star.rename_columns(column_params) def flipX_particles(self): ''' Flip particles in star file ''' self.particle_star.flipX() def check_particle_pos(self): ''' Check location of all particles ''' num_ptcls = self.particle_star.get_numRows() ptcl_list = np.arange(num_ptcls) ptcl_pos_list = [] for ptcl in ptcl_list: isInside = self.particle_star.is_particle_inside(ptcl, self.mic_apix, self.mic_NX, self.mic_NY) ptcl_pos_list.append(isInside) ptcl_pos_list = np.array(ptcl_pos_list) self.particle_data_props['insideFrame'] = ptcl_pos_list def delete_outside_ptcls(self): ''' Delete particles outside the frame ''' delete_ptcls = np.where(self.particle_data_props['insideFrame'] == False)[0] self.particle_star.delete_ptcls(delete_ptcls) def read_mic_header(self): ''' Read the header from first micrograph ''' if self.micrograph_star is not None: if self.micrograph_star.has_label('rlnMicrographName'): self.first_mic_file = self.micrograph_star.data_block.loc[0, 'rlnMicrographName'] self.first_mic_mrc = MRC(self.first_mic_file) self.set_mic_dimensions() def set_mic_dimensions(self): ''' Set micrograph dimensions ''' if 'NX' in self.first_mic_mrc.header.dtype.names: self.mic_NX = self.first_mic_mrc.header['NX'] self.mic_NY = self.first_mic_mrc.header['NY'] self.mic_NZ = self.first_mic_mrc.header['NZ'] elif len(self.first_mic_mrc.img3D.shape) == 3: self.mic_NZ, self.mic_NY, self.mic_NX = self.first_mic_mrc.img3D.shape else: self.mic_NY, self.mic_NX = self.first_mic_mrc.img3D.shape def set_mic_apix(self, apix=1.82): ''' Set micrograph apix ''' self.mic_apix = apix def set_particle_diameter(self, diameter): ''' Set particle diameter in Angstroms ''' self.particle_diameter_A = diameter def set_particle_apix(self, apix=1.82): ''' Set particle apix ''' self.particle_apix = apix def set_ref_class_apix(self, apix=1.82): ''' Set particle apix ''' self.ref_class_apix = apix def read_mic_apix(self): ''' Read and set micrograph apix ''' self.micrograph_star.determine_star_apix() self.set_mic_apix(self.micrograph_star.get_star_apix()) def read_particle_apix(self): ''' Read and set micrograph apix ''' self.particle_star.determine_star_apix() self.set_particle_apix(self.particle_star.get_star_apix()) def read_ref_apix(self): ''' Read ref apix ''' self.ref_class_star.determine_star_apix() self.set_ref_class_apix(self.ref_class_star.get_star_apix()) def read_class_refs(self, file, new_classids=False): ''' Read class refs ''' self.ref_class_star_file = os.path.abspath(file) self.ref_class_star = Star(file) # Generate new classids from particle numbers in image names if new_classids: self.ref_class_star.num2className() def read_particle_stars(self, files): ''' Read a batch of star files to process ''' # Count number of files file_counter = 0 for file in files: glob_files = glob.glob(file) # Iterate over glob files for glob_file in glob_files: # Get the file extension head, ext = os.path.splitext(glob_file) if os.path.isfile(glob_file) and ext == '.star': # Read the first file if file_counter == 0: self.read_particles(glob_file) else: self.read_other_particles(glob_file) # Update file counter file_counter += 1 # Merge the star files self.particle_star.merge_star(self.other_star) def read_particles(self, file): ''' Read particle star ''' self.particle_star_file = os.path.abspath(file) self.particle_star = Star(file) def read_other_particles(self, file): ''' Read other star ''' self.other_star_file = os.path.abspath(file) self.other_star = Star(file) def read_micrographs(self, file): ''' Read micrograph star file ''' self.micrograph_star_file = os.path.abspath(file) self.micrograph_star = Star(file) def sort_micrographs(self, column='rlnDefocusU'): ''' Sort micrographs ''' self.micrograph_star.sort(column=column) def get_class_ids(self): ''' Get class names ''' self.class_ids = self.particle_star.get_class_ids() def recenter_particles(self): ''' Recenter particles ''' self.particle_star.determine_star_apix() self.particle_star.recenter2D(mic_apix=self.mic_apix) def copy_from_ref(self, columns=[], compare='img'): ''' Copy columns from reference star ''' if compare == 'img': cmp_columns = ['shortImageName'] # Create short Image name self.particle_star.create_shortImageName() self.ref_class_star.create_shortImageName() else: cmp_columns = ['rlnMicrographName', 'rlnCoordinateX', 'rlnCoordinateY'] # Delete copy columns self.particle_star.delete_columns(columns) # Shrink reference star data block self.ref_class_star.keep_columns(columns+cmp_columns) # Merge the two data sets self.particle_star.merge(self.ref_class_star, cmp_columns) # Delete accesory columns if compare == 'img': self.particle_star.delete_shortImageName() def copy_columns(self, column_params): ''' Copy from one column to another new column in particle star file ''' if column_params is not None: for from_column, to_column in column_params.items(): self.particle_star.copy_column2column(from_column, to_column) def reset_offsets(self): ''' Make all offsets 0 ''' offset_columns = ['rlnOriginX', 'rlnOriginY', 'rlnAnglePsi', 'rlnAngleTilt', 'rlnAngleRot'] for column in offset_columns: self.particle_star.set_column(column, 0) def add_columns(self, column_params=None): ''' Add new columns ''' if column_params is not None: for label, value in column_params.items(): self.particle_star.set_column(label, value) def delete_columns(self, column_params=None): ''' Delete columns ''' if column_params is not None: for label, value in column_params.items(): self.particle_star.delete_column(label) def reset_priors(self): ''' Delete prior columns ''' prior_columns = ['rlnOriginXPrior', 'rlnOriginYPrior', 'rlnAnglePsiPrior', 'rlnAngleRotPrior', 'rlnAngleTiltPrior'] for label in prior_columns: self.particle_star.delete_column(label) def toggle_flip(self): ''' Set flip on for particles ''' if self.particle_star: if self.particle_star.has_label('rlnIsFlip'): flipON = self.particle_star.data_block['rlnIsFlip'] == 1 flipOFF = self.particle_star.data_block['rlnIsFlip'] == 0 self.particle_star.data_block.loc[flipON, 'rlnIsFlip'] = 0 self.particle_star.data_block.loc[flipOFF,'rlnIsFlip'] = 1 else: self.particle_star.set_column('rlnIsFlip', 1) def transform_particles(self, final_offset=[0, 0], com_offset=False, rotate_psi=0): ''' Transform particle star file based on the class star file ''' if self.particle_star is None: print('No transformation due to missing particle data') return 0 if self.consensus_class_mrc is not None: final_offset = self.consensus_class_mrc.determine_com(img_num=0) if self.ref_class_star is not None: # Ref data block ref_data_block = self.ref_class_star.get_data_block() # Iterate through every class for i in range(ref_data_block.shape[0]): # Get class id class_id = ref_data_block['rlnClassNumber'][i] # Get rotangle if self.ref_class_star.has_label('rlnAnglePsi'): rot_angle = ref_data_block['rlnAnglePsi'][i] else: rot_angle = 0.0
<reponame>pdn4kd/isochoric-expander<gh_stars>0 '''Plots of relative errors for the 5p fits. Both general and zoomed in on the most important features. Ideally also a 2-parameter plot for a vs m.''' import numpy as np import matplotlib.pyplot as plt stars = np.genfromtxt("planetfits_matched.csv", delimiter=",", names=True, dtype=None) #nplanets = [(1,3), (1,4), (1,6), (4,4), (5,5), (6,6)] nplanets = [(1,4)] for minplanets, maxplanets in nplanets: f5p_yes_per = [star["per"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pfit_yes_per = [star["per_mid_5p"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmin_yes_per = [star["per_min_5p"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmax_yes_per = [star["per_max_5p"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5p_yes_k = [star["K"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pfit_yes_k = [star["K_mid_5p"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmin_yes_k = [star["K_min_5p"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmax_yes_k = [star["K_max_5p"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5p_yes_e = [star["e"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pfit_yes_e = [star["e_mid_5p"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmin_yes_e = [star["e_min_5p"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmax_yes_e = [star["e_max_5p"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5p_yes_m = [star["PlanetMass"] for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pstar_yes_m = [(star["StarMass"]+star["PlanetMass"]/332946.07832806994) for star in stars if ((star["5p_Favored"] == b"Yes") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5p_no_per = [star["per"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pfit_no_per = [star["per_mid_5p"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmin_no_per = [star["per_min_5p"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmax_no_per = [star["per_max_5p"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5p_no_k= [star["K"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pfit_no_k = [star["K_mid_5p"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmin_no_k = [star["K_min_5p"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmax_no_k = [star["K_max_5p"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5p_no_e= [star["e"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pfit_no_e = [star["e_mid_5p"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmin_no_e = [star["e_min_5p"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmax_no_e = [star["e_max_5p"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5p_no_m = [star["PlanetMass"] for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pstar_no_m = [(star["StarMass"]+star["PlanetMass"]/332946.07832806994) for star in stars if ((star["5p_Favored"] == b"No") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5p_mar_per = [star["per"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pfit_mar_per = [star["per_mid_5p"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmin_mar_per = [star["per_min_5p"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmax_mar_per = [star["per_max_5p"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5p_mar_k = [star["K"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pfit_mar_k = [star["K_mid_5p"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmin_mar_k = [star["K_min_5p"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmax_mar_k = [star["K_max_5p"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5p_mar_e = [star["e"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pfit_mar_e = [star["e_mid_5p"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmin_mar_e = [star["e_min_5p"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pmax_mar_e = [star["e_max_5p"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5p_mar_m = [star["PlanetMass"] for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pstar_mar_m = [(star["StarMass"]+star["PlanetMass"]/332946.07832806994) for star in stars if ((star["5p_Favored"] == b"Marginal") and (star["num"] >= minplanets) and (star["num"] <= maxplanets))] f5pΔ_yes_per = np.zeros(len(f5pfit_yes_per)) f5perrp_yes_per = np.zeros(len(f5pfit_yes_per)) f5perrm_yes_per = np.zeros(len(f5pfit_yes_per)) f5pΔ_yes_k = np.zeros(len(f5pfit_yes_k)) f5perrp_yes_k = np.zeros(len(f5pfit_yes_k)) f5perrm_yes_k = np.zeros(len(f5pfit_yes_k)) f5pΔ_yes_e = np.zeros(len(f5pfit_yes_e)) f5perrp_yes_e = np.zeros(len(f5pfit_yes_e)) f5perrm_yes_e = np.zeros(len(f5pfit_yes_e)) f5pΔa_yes_e = np.zeros(len(f5pfit_yes_e)) f5perrap_yes_e = np.zeros(len(f5pfit_yes_e)) f5perram_yes_e = np.zeros(len(f5pfit_yes_e)) for j in np.arange(0, len(f5pfit_yes_per)): f5pΔ_yes_per[j] = (f5pfit_yes_per[j] - f5p_yes_per[j]) / (f5p_yes_per[j]) f5perrp_yes_per[j] = (f5pmax_yes_per[j] - f5pfit_yes_per[j]) / (f5p_yes_per[j]) f5perrm_yes_per[j] = (f5pfit_yes_per[j] - f5pmin_yes_per[j]) / (f5p_yes_per[j]) f5pΔ_yes_k[j] = (f5pfit_yes_k[j] - f5p_yes_k[j]) / (f5p_yes_k[j]) f5perrp_yes_k[j] = (f5pmax_yes_k[j] - f5pfit_yes_k[j]) / (f5p_yes_k[j]) f5perrm_yes_k[j] = (f5pfit_yes_k[j] - f5pmin_yes_k[j]) / (f5p_yes_k[j]) f5pΔ_yes_e[j] = (f5pfit_yes_e[j] - f5p_yes_e[j]) / (f5p_yes_e[j]) f5perrp_yes_e[j] = (f5pmax_yes_e[j] - f5pfit_yes_e[j]) / (f5p_yes_e[j]) f5perrm_yes_e[j] = (f5pfit_yes_e[j] - f5pmin_yes_e[j]) / (f5p_yes_e[j]) f5pΔa_yes_e[j] = (f5pfit_yes_e[j] - f5p_yes_e[j]) f5perrap_yes_e[j] = (f5pmax_yes_e[j] - f5pfit_yes_e[j]) f5perram_yes_e[j] = (f5pfit_yes_e[j] - f5pmin_yes_e[j]) f5pΔ_no_per = np.zeros(len(f5pfit_no_per)) f5perrp_no_per = np.zeros(len(f5pfit_no_per)) f5perrm_no_per = np.zeros(len(f5pfit_no_per)) f5pΔ_no_k = np.zeros(len(f5pfit_no_k)) f5perrp_no_k = np.zeros(len(f5pfit_no_k)) f5perrm_no_k = np.zeros(len(f5pfit_no_k)) f5pΔ_no_e = np.zeros(len(f5pfit_no_e)) f5perrp_no_e = np.zeros(len(f5pfit_no_e)) f5perrm_no_e = np.zeros(len(f5pfit_no_e)) f5pΔa_no_e = np.zeros(len(f5pfit_no_e)) f5perrap_no_e = np.zeros(len(f5pfit_no_e)) f5perram_no_e = np.zeros(len(f5pfit_no_e)) for j in np.arange(0, len(f5pfit_no_per)): f5pΔ_no_per[j] = (f5pfit_no_per[j] - f5p_no_per[j]) / (f5p_no_per[j]) f5perrp_no_per[j] = (f5pmax_no_per[j] - f5pfit_no_per[j]) / (f5p_no_per[j]) f5perrm_no_per[j] = (f5pfit_no_per[j] - f5pmin_no_per[j]) / (f5p_no_per[j]) f5pΔ_no_k[j] = (f5pfit_no_k[j] - f5p_no_k[j]) / (f5p_no_k[j]) f5perrp_no_k[j] = (f5pmax_no_k[j] - f5pfit_no_k[j]) / (f5p_no_k[j]) f5perrm_no_k[j] = (f5pfit_no_k[j] - f5pmin_no_k[j]) / (f5p_no_k[j]) f5pΔ_no_e[j] = (f5pfit_no_e[j] - f5p_no_e[j]) / (f5p_no_e[j]) f5perrp_no_e[j] = (f5pmax_no_e[j] - f5pfit_no_e[j]) / (f5p_no_e[j]) f5perrm_no_e[j] = (f5pfit_no_e[j] - f5pmin_no_e[j]) / (f5p_no_e[j]) f5pΔa_no_e[j] = (f5pfit_no_e[j] - f5p_no_e[j]) f5perrap_no_e[j] = (f5pmax_no_e[j] - f5pfit_no_e[j]) f5perram_no_e[j] = (f5pfit_no_e[j] - f5pmin_no_e[j]) f5pΔ_mar_per = np.zeros(len(f5pfit_mar_per)) f5perrp_mar_per = np.zeros(len(f5pfit_mar_per)) f5perrm_mar_per = np.zeros(len(f5pfit_mar_per)) f5pΔ_mar_k = np.zeros(len(f5pfit_mar_k)) f5perrp_mar_k = np.zeros(len(f5pfit_mar_k)) f5perrm_mar_k = np.zeros(len(f5pfit_mar_k)) f5pΔ_mar_e = np.zeros(len(f5pfit_mar_e)) f5perrp_mar_e = np.zeros(len(f5pfit_mar_e)) f5perrm_mar_e = np.zeros(len(f5pfit_mar_e)) f5pΔa_mar_e = np.zeros(len(f5pfit_mar_e)) f5perrap_mar_e = np.zeros(len(f5pfit_mar_e)) f5perram_mar_e = np.zeros(len(f5pfit_mar_e)) for j in np.arange(0, len(f5pfit_mar_per)): f5pΔ_mar_per[j] = (f5pfit_mar_per[j] - f5p_mar_per[j]) / (f5p_mar_per[j]) f5perrp_mar_per[j] = (f5pmax_mar_per[j] - f5pfit_mar_per[j]) / (f5p_mar_per[j]) f5perrm_mar_per[j] = (f5pfit_mar_per[j] - f5pmin_mar_per[j]) / (f5p_mar_per[j]) f5pΔ_mar_k[j] = (f5pfit_mar_k[j] - f5p_mar_k[j]) / (f5p_mar_k[j]) f5perrp_mar_k[j] = (f5pmax_mar_k[j] - f5pfit_mar_k[j]) / (f5p_mar_k[j]) f5perrm_mar_k[j] = (f5pfit_mar_k[j] - f5pmin_mar_k[j]) / (f5p_mar_k[j]) f5pΔ_mar_e[j] = (f5pfit_mar_e[j] - f5p_mar_e[j]) / (f5p_mar_e[j]) f5perrp_mar_e[j] = (f5pmax_mar_e[j] - f5pfit_mar_e[j]) / (f5p_mar_e[j]) f5perrm_mar_e[j] = (f5pfit_mar_e[j] - f5pmin_mar_e[j]) / (f5p_mar_e[j]) f5pΔa_mar_e[j] = (f5pfit_mar_e[j] - f5p_mar_e[j]) f5perrap_mar_e[j] = (f5pmax_mar_e[j] - f5pfit_mar_e[j]) f5perram_mar_e[j] = (f5pfit_mar_e[j] - f5pmin_mar_e[j]) fig, f5pp = plt.subplots() f5pp.errorbar(f5p_yes_per, f5pΔ_yes_per, yerr=[f5perrm_yes_per, f5perrp_yes_per], color="#0000FF", fmt='o') f5pp.errorbar(f5p_mar_per, f5pΔ_mar_per, yerr=[f5perrm_mar_per, f5perrp_mar_per], color="#FF0000", fmt='o') f5pp.errorbar(f5p_no_per, f5pΔ_no_per, yerr=[f5perrm_no_per, f5perrp_no_per], color="#000000", fmt='o') f5pp.set_xscale('log') f5pp.set_ylabel("Relative Error") f5pp.set_xlabel("Period (Days)") f5pp.set_title("Fitting: Period, K, Time of Conjunction, Ecc (max 0.5) ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)") #f5ppk.legend(loc=2) filename = "5pp_error_" + str(minplanets) + str(maxplanets) + ".png" #plt.savefig(filename) fig, f5pk = plt.subplots() f5pk.errorbar(f5p_yes_k, f5pΔ_yes_k, yerr=[f5perrm_yes_k, f5perrp_yes_k], color="#0000FF", fmt='o') f5pk.errorbar(f5p_mar_k, f5pΔ_mar_k, yerr=[f5perrm_mar_k, f5perrp_mar_k], color="#FF0000", fmt='o') f5pk.errorbar(f5p_no_k, f5pΔ_no_k, yerr=[f5perrm_no_k, f5perrp_no_k], color="#000000", fmt='o') f5pk.set_xscale('log') f5pk.set_ylabel("Relative Error") f5pk.set_xlabel("Semi-amplitude (m/s)") f5pk.set_ylim(-1.0,1.5) #f5pk.set_xlim(6e-2,1e2) f5pk.set_title("Fitting: Period, K, Time of Conjunction, Ecc (max 0.5) ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)") #f5pkk.legend(loc=2) filename = "5pk_error_zoom_" + str(minplanets) + str(maxplanets) + ".png" #plt.savefig(filename) fig, f5pe = plt.subplots() f5pe.errorbar(f5p_yes_e, f5pΔ_yes_e, yerr=[f5perrm_yes_e, f5perrp_yes_e], color="#0000FF", fmt='o') f5pe.errorbar(f5p_mar_e, f5pΔ_mar_e, yerr=[f5perrm_mar_e, f5perrp_mar_e], color="#FF0000", fmt='o') f5pe.errorbar(f5p_no_e, f5pΔ_no_e, yerr=[f5perrm_no_e, f5perrp_no_e], color="#000000", fmt='o') f5pe.set_xscale('log') f5pe.set_ylabel("Relative Error") f5pe.set_xlabel("Eccentricity") f5pe.set_xlim(4e-2,0.6) f5pe.set_ylim(-1.2,2) f5pe.set_title("Fitting: Period, K, Time of Conjunction, Ecc (max 0.5) ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)") #f5pek.legend(loc=2) filename = "5pe_error_zoom" + str(minplanets) + str(maxplanets) + ".png" #plt.savefig(filename) fig, f5pea = plt.subplots() f5pea.errorbar(f5p_yes_e, f5pΔa_yes_e, yerr=[f5perram_yes_e, f5perrap_yes_e], color="#0000FF", fmt='o') f5pea.errorbar(f5p_mar_e, f5pΔa_mar_e, yerr=[f5perram_mar_e, f5perrap_mar_e], color="#FF0000", fmt='o') f5pea.errorbar(f5p_no_e, f5pΔa_no_e, yerr=[f5perram_no_e, f5perrap_no_e], color="#000000", fmt='o') f5pea.set_xscale('log') f5pea.set_ylabel("Error") f5pea.set_xlabel("Eccentricity") #f5pea.set_xlim(4e-2,0.6) #f5pea.set_ylim(-0.5,0.5) f5pea.set_title("Fitting: Period, K, Time of Conjunction, Ecc (max 0.5) ("+str(minplanets)+"-"+str(maxplanets)+" planet systems)") #f5peak.legend(loc=2) filename = "5pe_error_abs_log" + str(minplanets) + str(maxplanets) + ".png" #plt.savefig(filename) ''' # Getting mass involves consderably more potential error # msini = \frac{K}{K_0} \sqrt{1-e^2} (m+M)^{2/3} P^{1/3} # Since period and eccentricity are also being fit, we would need to do an error propogation calculation to get proper error pars on mass. We are ignoring
= utils.random_unicode() pub = utils.random_unicode() priv = utils.random_unicode() ep_dict = {"publicURL": pub, "privateURL": priv, "tenantId": "aa"} rgn = "FOO" clt = fakes.FakeClient() ep = fakes.FakeEndpoint(ep_dict, svc, rgn, self.identity) ep._get_client = Mock(return_value=clt) ident.services[svc].endpoints = {region: ep} ret = ident.get_client(svc, region, public=False) self.assertEqual(ret, clt) ep._get_client.assert_called_once_with(cached=True, public=False, client_class=None) @patch("pyrax.client_class_for_service") def test_get_client_no_cache(self, mock_ccfs): ident = self.identity ident.authenticated = True svc = "fake" region = utils.random_unicode() pub = utils.random_unicode() priv = utils.random_unicode() ep_dict = {"publicURL": pub, "privateURL": priv, "tenantId": "aa"} rgn = "FOO" clt_class = fakes.FakeClient ep = fakes.FakeEndpoint(ep_dict, svc, rgn, self.identity) mock_ccfs.return_value = clt_class ident.services[svc].endpoints = {region: ep} ret = ident.get_client(svc, region, cached=False) self.assertTrue(isinstance(ret, clt_class)) mock_ccfs.assert_called_once_with(svc) def test_get_client_no_client(self): ident = self.identity ident.authenticated = True svc = "fake" region = utils.random_unicode() pub = utils.random_unicode() priv = utils.random_unicode() ep_dict = {"publicURL": pub, "privateURL": priv, "tenantId": "aa"} rgn = "FOO" ep = fakes.FakeEndpoint(ep_dict, svc, rgn, self.identity) ep._get_client = Mock(return_value=None) ident.services[svc].endpoints = {region: ep} self.assertRaises(exc.NoSuchClient, ident.get_client, svc, region) def test_set_credentials(self): for cls in self.id_classes.values(): ident = cls() ident.authenticate = Mock() ident.set_credentials(self.username, self.password, authenticate=True) self.assertEqual(ident.username, self.username) self.assertEqual(ident.password, <PASSWORD>) self.assertIsNone(ident.token) self.assertIsNone(ident._creds_file) ident.authenticate.assert_called_once_with() def test_set_credential_file(self): ident = self.rax_identity_class() user = "fakeuser" # Use percent signs in key to ensure it doesn't get interpolated. key = "fake%api%key" ident.authenticate = Mock() with utils.SelfDeletingTempfile() as tmpname: with open(tmpname, "wb") as ff: ff.write("[rackspace_cloud]\n") ff.write("username = %s\n" % user) ff.write("api_key = %s\n" % key) ident.set_credential_file(tmpname, authenticate=True) self.assertEqual(ident.username, user) self.assertEqual(ident.password, key) # Using 'password' instead of 'api_key' with utils.SelfDeletingTempfile() as tmpname: with open(tmpname, "wb") as ff: ff.write("[rackspace_cloud]\n") ff.write("username = %s\n" % user) ff.write("password = %<PASSWORD>" % key) ident.set_credential_file(tmpname) self.assertEqual(ident.username, user) self.assertEqual(ident.password, key) # File doesn't exist self.assertRaises(exc.FileNotFound, ident.set_credential_file, "doesn't exist") # Missing section with utils.SelfDeletingTempfile() as tmpname: with open(tmpname, "wb") as ff: ff.write("user = x\n") self.assertRaises(exc.InvalidCredentialFile, ident.set_credential_file, tmpname) # Incorrect section with utils.SelfDeletingTempfile() as tmpname: with open(tmpname, "wb") as ff: ff.write("[bad_section]\nusername = x\napi_key = y\n") self.assertRaises(exc.InvalidCredentialFile, ident.set_credential_file, tmpname) # Incorrect option with utils.SelfDeletingTempfile() as tmpname: with open(tmpname, "wb") as ff: ff.write("[rackspace_cloud]\nuserbad = x\napi_key = y\n") self.assertRaises(exc.InvalidCredentialFile, ident.set_credential_file, tmpname) def test_set_credential_file_keystone(self): ident = pyrax.keystone_identity.KeystoneIdentity(username=self.username, password=self.password) user = "fakeuser" password = "<PASSWORD>" tenant_id = "faketenantid" with utils.SelfDeletingTempfile() as tmpname: with file(tmpname, "wb") as ff: ff.write("[keystone]\n") ff.write("username = %s\n" % user) ff.write("password = <PASSWORD>" % password) ff.write("tenant_id = %s\n" % tenant_id) ident.set_credential_file(tmpname) self.assertEqual(ident.username, user) self.assertEqual(ident.password, password) def test_keyring_auth_no_keyring(self): ident = self.identity sav = pyrax.base_identity.keyring pyrax.base_identity.keyring = None self.assertRaises(exc.KeyringModuleNotInstalled, ident.keyring_auth) pyrax.base_identity.keyring = sav def test_keyring_auth_no_username(self): ident = self.identity sav = pyrax.get_setting pyrax.get_setting = Mock(return_value=None) self.assertRaises(exc.KeyringUsernameMissing, ident.keyring_auth) pyrax.get_setting = sav def test_keyring_auth_no_password(self): ident = self.identity sav = pyrax.base_identity.keyring.get_password pyrax.base_identity.keyring.get_password = Mock(return_value=None) self.assertRaises(exc.KeyringPasswordNotFound, ident.keyring_auth, "fake") pyrax.base_identity.keyring.get_password = sav def test_keyring_auth_apikey(self): ident = self.identity ident.authenticate = Mock() sav = pyrax.base_identity.keyring.get_password pw = utils.random_unicode() pyrax.base_identity.keyring.get_password = Mock(return_value=pw) user = utils.random_unicode() ident._creds_style = "apikey" ident.keyring_auth(username=user) ident.authenticate.assert_called_once_with(username=user, api_key=pw) pyrax.base_identity.keyring.get_password = sav def test_keyring_auth_password(self): ident = self.identity ident.authenticate = Mock() sav = pyrax.base_identity.keyring.get_password pw = utils.random_unicode() pyrax.base_identity.keyring.get_password = Mock(return_value=pw) user = utils.random_unicode() ident._creds_style = "password" ident.keyring_auth(username=user) ident.authenticate.assert_called_once_with(username=user, password=pw) pyrax.base_identity.keyring.get_password = sav def test_get_extensions(self): ident = self.identity v1 = utils.random_unicode() v2 = utils.random_unicode() resp_body = {"extensions": {"values": [v1, v2]}} ident.method_get = Mock(return_value=(None, resp_body)) ret = ident.get_extensions() self.assertEqual(ret, [v1, v2]) def test_get_credentials_rax(self): ident = self.rax_identity_class(username=self.username, api_key=self.password) ident._creds_style = "apikey" creds = ident._format_credentials() user = creds["auth"]["RAX-KSKEY:apiKeyCredentials"]["username"] key = creds["auth"]["RAX-KSKEY:apiKeyCredentials"]["apiKey"] self.assertEqual(self.username, user) self.assertEqual(self.password, key) def test_get_credentials_rax_password(self): ident = self.rax_identity_class(username=self.username, password=self.password) ident._creds_style = "password" creds = ident._format_credentials() user = creds["auth"]["passwordCredentials"]["username"] key = creds["auth"]["passwordCredentials"]["password"] self.assertEqual(self.username, user) self.assertEqual(self.password, key) def test_get_credentials_keystone(self): ident = self.keystone_identity_class(username=self.username, password=self.password) creds = ident._format_credentials() user = creds["auth"]["passwordCredentials"]["username"] key = creds["auth"]["passwordCredentials"]["password"] self.assertEqual(self.username, user) self.assertEqual(self.password, key) def test_authenticate(self): savrequest = pyrax.http.request fake_resp = fakes.FakeIdentityResponse() fake_body = fakes.fake_identity_response pyrax.http.request = Mock(return_value=(fake_resp, fake_body)) for cls in self.id_classes.values(): ident = cls() if cls is self.keystone_identity_class: # Necessary for testing to avoid NotImplementedError. utils.add_method(ident, lambda self: "", "_get_auth_endpoint") ident.authenticate() pyrax.http.request = savrequest def test_authenticate_fail_creds(self): ident = self.rax_identity_class(username="BAD", password="<PASSWORD>") savrequest = pyrax.http.request fake_resp = fakes.FakeIdentityResponse() fake_resp.status_code = 401 fake_body = fakes.fake_identity_response pyrax.http.request = Mock(return_value=(fake_resp, fake_body)) self.assertRaises(exc.AuthenticationFailed, ident.authenticate) pyrax.http.request = savrequest def test_authenticate_fail_other(self): ident = self.rax_identity_class(username="BAD", password="BAD") savrequest = pyrax.http.request fake_resp = fakes.FakeIdentityResponse() fake_resp.status_code = 500 fake_body = {u'unauthorized': { u'message': u'Username or api key is invalid', u'code': 500}} pyrax.http.request = Mock(return_value=(fake_resp, fake_body)) self.assertRaises(exc.InternalServerError, ident.authenticate) pyrax.http.request = savrequest def test_authenticate_fail_no_message(self): ident = self.rax_identity_class(username="BAD", password="<PASSWORD>") savrequest = pyrax.http.request fake_resp = fakes.FakeIdentityResponse() fake_resp.status_code = 500 fake_body = {u'unauthorized': { u'bogus': u'Username or api key is invalid', u'code': 500}} pyrax.http.request = Mock(return_value=(fake_resp, fake_body)) self.assertRaises(exc.InternalServerError, ident.authenticate) pyrax.http.request = savrequest def test_authenticate_fail_gt_299(self): ident = self.rax_identity_class(username="BAD", password="<PASSWORD>") savrequest = pyrax.http.request fake_resp = fakes.FakeIdentityResponse() fake_resp.status_code = 444 fake_body = {u'unauthorized': { u'message': u'Username or api key is invalid', u'code': 500}} pyrax.http.request = Mock(return_value=(fake_resp, fake_body)) self.assertRaises(exc.AuthenticationFailed, ident.authenticate) pyrax.http.request = savrequest def test_authenticate_fail_gt_299ino_message(self): ident = self.rax_identity_class(username="BAD", password="<PASSWORD>") savrequest = pyrax.http.request fake_resp = fakes.FakeIdentityResponse() fake_resp.status_code = 444 fake_body = {u'unauthorized': { u'bogus': u'Username or api key is invalid', u'code': 500}} pyrax.http.request = Mock(return_value=(fake_resp, fake_body)) self.assertRaises(exc.AuthenticationFailed, ident.authenticate) pyrax.http.request = savrequest def test_authenticate_backwards_compatibility_connect_param(self): savrequest = pyrax.http.request fake_resp = fakes.FakeIdentityResponse() fake_body = fakes.fake_identity_response pyrax.http.request = Mock(return_value=(fake_resp, fake_body)) for cls in self.id_classes.values(): ident = cls() if cls is self.keystone_identity_class: # Necessary for testing to avoid NotImplementedError. utils.add_method(ident, lambda self: "", "_get_auth_endpoint") ident.authenticate(connect=False) pyrax.http.request = savrequest def test_rax_endpoints(self): ident = self.rax_identity_class() sav = pyrax.get_setting("auth_endpoint") fake_ep = utils.random_unicode() pyrax.set_setting("auth_endpoint", fake_ep) ep = ident._get_auth_endpoint() self.assertEqual(ep, fake_ep) pyrax.set_setting("auth_endpoint", sav) def test_auth_token(self): for cls in self.id_classes.values(): ident = cls() test_token = utils.random_unicode() ident.token = test_token self.assertEqual(ident.auth_token, test_token) def test_auth_endpoint(self): for cls in self.id_classes.values(): ident = cls() test_ep = utils.random_unicode() ident._get_auth_endpoint = Mock(return_value=test_ep) self.assertEqual(ident.auth_endpoint, test_ep) def test_set_auth_endpoint(self): for cls in self.id_classes.values(): ident = cls() test_ep = utils.random_unicode() ident.auth_endpoint = test_ep self.assertEqual(ident._auth_endpoint, test_ep) def test_regions(self): ident = self.base_identity_class() fake_resp = fakes.FakeIdentityResponse() ident._parse_response(fake_resp.json()) expected = ("DFW", "ORD", "SYD", "FAKE") self.assertEqual(len(ident.regions), len(expected)) for rgn in expected: self.assertTrue(rgn in ident.regions) def test_getattr_service(self): ident = self.base_identity_class() ident.authenticated = True svc = self.service pub = utils.random_unicode() priv = utils.random_unicode() ep_dict = {"publicURL": pub, "privateURL": priv, "tenantId": "aa"} rgn = "FOO" ep = fakes.FakeEndpoint(ep_dict, svc, rgn, self.identity) self.service.endpoints = {rgn: ep} ident.services = {"fake": self.service} ret = ident.fake self.assertEqual(ret, self.service.endpoints) def test_getattr_region(self): ident = self.base_identity_class() ident.authenticated = True svc = self.service pub = utils.random_unicode() priv = utils.random_unicode() ep_dict = {"publicURL": pub, "privateURL": priv, "tenantId": "aa"} rgn = "FOO" ep = fakes.FakeEndpoint(ep_dict, svc, rgn, self.identity) self.service.endpoints = {rgn: ep} ident.services = {"fake": self.service} ret = ident.FOO self.assertEqual(ret, {"fake": ep}) def test_getattr_fail(self): ident = self.base_identity_class() ident.authenticated = True svc = self.service pub = utils.random_unicode() priv = utils.random_unicode() ep_dict = {"publicURL": pub, "privateURL": priv, "tenantId": "aa"} rgn = "FOO" ep = fakes.FakeEndpoint(ep_dict, svc, rgn, self.identity) self.service.endpoints = {rgn: ep} ident.services = {"fake": self.service} self.assertRaises(AttributeError, getattr, ident, "BAR") def test_getattr_not_authed(self): ident = self.base_identity_class() ident.authenticated = False svc = self.service pub = utils.random_unicode() priv = utils.random_unicode() ep_dict = {"publicURL": pub, "privateURL": priv, "tenantId": "aa"} rgn = "FOO" ep = fakes.FakeEndpoint(ep_dict, svc, rgn, self.identity) self.service.endpoints = {rgn: ep} ident.services = {"fake": self.service} self.assertRaises(exc.NotAuthenticated, getattr, ident, "BAR") def test_http_methods(self): ident = self.base_identity_class() ident._call = Mock() uri = utils.random_unicode() dkv = utils.random_unicode() data = {dkv: dkv} hkv = utils.random_unicode() headers = {hkv: hkv} std_headers = True ident.method_get(uri, admin=False, data=data, headers=headers, std_headers=std_headers) ident._call.assert_called_with("GET", uri, False, data, headers, std_headers) ident.method_head(uri, admin=False, data=data, headers=headers, std_headers=std_headers) ident._call.assert_called_with("HEAD", uri, False, data, headers, std_headers) ident.method_post(uri, admin=False, data=data, headers=headers, std_headers=std_headers) ident._call.assert_called_with("POST", uri, False, data, headers, std_headers) ident.method_put(uri, admin=False, data=data, headers=headers, std_headers=std_headers) ident._call.assert_called_with("PUT", uri, False, data, headers, std_headers) ident.method_delete(uri, admin=False, data=data, headers=headers, std_headers=std_headers) ident._call.assert_called_with("DELETE", uri, False, data, headers, std_headers) ident.method_patch(uri, admin=False, data=data, headers=headers, std_headers=std_headers) ident._call.assert_called_with("PATCH", uri, False, data, headers, std_headers) def test_call(self): ident = self.base_identity_class() sav_req = pyrax.http.request pyrax.http.request = Mock() sav_debug = ident.http_log_debug ident.http_log_debug = True uri = "https://%s/%s" % (utils.random_ascii(), utils.random_ascii()) sav_stdout = sys.stdout out = StringIO() sys.stdout = out utils.add_method(ident, lambda self: "", "_get_auth_endpoint") dkv = utils.random_ascii() data = {dkv: dkv} hkv = utils.random_ascii() headers = {hkv: hkv} for std_headers in (True, False): expected_headers = ident._standard_headers() if std_headers else {} expected_headers.update(headers) for admin in (True, False): ident.method_post(uri, data=data, headers=headers, std_headers=std_headers, admin=admin) pyrax.http.request.assert_called_with("POST", uri, body=data, headers=expected_headers) self.assertEqual(out.getvalue(), "") out.seek(0) out.truncate()
[{}] updated" .format(rsrc.id, subcloud_rsrc_id, ntpservers), extra=self.log_extra) def sync_snmp_trapdest(self, request, rsrc): switcher = { consts.OPERATION_TYPE_POST: self.snmp_trapdest_create, consts.OPERATION_TYPE_CREATE: self.snmp_trapdest_create, consts.OPERATION_TYPE_DELETE: self.snmp_trapdest_delete, } func = switcher[request.orch_job.operation_type] try: func(request, rsrc) except (keystone_exceptions.connection.ConnectTimeout, keystone_exceptions.ConnectFailure) as e: LOG.info("sync_snmp_trapdest: subcloud {} is not reachable [{}]" .format(self.subcloud_engine.subcloud.region_name, str(e)), extra=self.log_extra) raise exceptions.SyncRequestTimeout except Exception as e: LOG.exception(e) raise exceptions.SyncRequestFailedRetry def snmp_trapdest_create(self, request, rsrc): LOG.info("snmp_trapdest_create region {} resource_info={}".format( self.subcloud_engine.subcloud.region_name, request.orch_job.resource_info), extra=self.log_extra) resource_info_dict = jsonutils.loads(request.orch_job.resource_info) payload = resource_info_dict.get('payload') if not payload: payload = resource_info_dict s_os_client = sdk.OpenStackDriver(self.region_name) try: itrapdest = s_os_client.sysinv_client.snmp_trapdest_create( payload) itrapdest_id = itrapdest.uuid ip_address = itrapdest.ip_address except (exceptions.ConnectionRefused, exceptions.NotAuthorized, exceptions.TimeOut): LOG.info("snmp_trapdest_create exception Timeout", extra=self.log_extra) s_os_client.delete_region_clients(self.region_name) raise exceptions.SyncRequestTimeout except (AttributeError, TypeError) as e: LOG.info("snmp_trapdest_create error {}".format(e), extra=self.log_extra) s_os_client.delete_region_clients(self.region_name, clear_token=True) raise exceptions.SyncRequestFailedRetry # Now persist the subcloud resource to the DB for later subcloud_rsrc_id = self.persist_db_subcloud_resource( rsrc.id, ip_address) LOG.info("SNMP trapdest {}:{} [{}/{}] created".format(rsrc.id, subcloud_rsrc_id, ip_address, itrapdest_id), extra=self.log_extra) return itrapdest def snmp_trapdest_delete(self, request, rsrc): subcloud_rsrc = self.get_db_subcloud_resource(rsrc.id) if not subcloud_rsrc: return s_os_client = sdk.OpenStackDriver(self.region_name) try: s_os_client.sysinv_client.snmp_trapdest_delete( subcloud_rsrc.subcloud_resource_id) except exceptions.TrapDestNotFound: # SNMP trapdest already deleted in subcloud, carry on. LOG.info("SNMP trapdest not in subcloud, may be already deleted", extra=self.log_extra) except (exceptions.ConnectionRefused, exceptions.NotAuthorized, exceptions.TimeOut): LOG.info("snmp_trapdest_delete exception Timeout", extra=self.log_extra) s_os_client.delete_region_clients(self.region_name) raise exceptions.SyncRequestTimeout except (AttributeError, TypeError) as e: LOG.info("snmp_trapdest_delete error {}".format(e), extra=self.log_extra) s_os_client.delete_region_clients(self.region_name, clear_token=True) raise exceptions.SyncRequestFailedRetry subcloud_rsrc.delete() # Master Resource can be deleted only when all subcloud resources # are deleted along with corresponding orch_job and orch_requests. LOG.info("SNMP trapdest {}:{} [{}] deleted".format( rsrc.id, subcloud_rsrc.id, subcloud_rsrc.subcloud_resource_id), extra=self.log_extra) def sync_snmp_community(self, request, rsrc): switcher = { consts.OPERATION_TYPE_POST: self.snmp_community_create, consts.OPERATION_TYPE_CREATE: self.snmp_community_create, consts.OPERATION_TYPE_DELETE: self.snmp_community_delete, } func = switcher[request.orch_job.operation_type] try: func(request, rsrc) except (keystone_exceptions.connection.ConnectTimeout, keystone_exceptions.ConnectFailure) as e: LOG.info("sync_snmp_community: subcloud {} is not reachable [{}]" .format(self.subcloud_engine.subcloud.region_name, str(e)), extra=self.log_extra) raise exceptions.SyncRequestTimeout except Exception as e: LOG.exception(e) raise exceptions.SyncRequestFailedRetry def snmp_community_create(self, request, rsrc): LOG.info("snmp_community_create region {} resource_info={}".format( self.subcloud_engine.subcloud.region_name, request.orch_job.resource_info), extra=self.log_extra) resource_info_dict = jsonutils.loads(request.orch_job.resource_info) payload = resource_info_dict.get('payload') if not payload: payload = resource_info_dict s_os_client = sdk.OpenStackDriver(self.region_name) try: icommunity = s_os_client.sysinv_client.snmp_community_create( payload) icommunity_id = icommunity.uuid community = icommunity.community except (exceptions.ConnectionRefused, exceptions.NotAuthorized, exceptions.TimeOut): LOG.info("snmp_community_create exception Timeout", extra=self.log_extra) s_os_client.delete_region_clients(self.region_name) raise exceptions.SyncRequestTimeout except (AttributeError, TypeError) as e: LOG.info("snmp_community_create error {}".format(e), extra=self.log_extra) s_os_client.delete_region_clients(self.region_name, clear_token=True) raise exceptions.SyncRequestFailedRetry # Now persist the subcloud resource to the DB for later subcloud_rsrc_id = self.persist_db_subcloud_resource( rsrc.id, community) LOG.info("SNMP community {}:{} [{}/{}] created".format(rsrc.id, subcloud_rsrc_id, community, icommunity_id), extra=self.log_extra) return icommunity def snmp_community_delete(self, request, rsrc): subcloud_rsrc = self.get_db_subcloud_resource(rsrc.id) if not subcloud_rsrc: return s_os_client = sdk.OpenStackDriver(self.region_name) try: s_os_client.sysinv_client.snmp_community_delete( subcloud_rsrc.subcloud_resource_id) except exceptions.CommunityNotFound: # Community already deleted in subcloud, carry on. LOG.info("SNMP community not in subcloud, may be already deleted", extra=self.log_extra) except (exceptions.ConnectionRefused, exceptions.NotAuthorized, exceptions.TimeOut): LOG.info("snmp_community_delete exception Timeout", extra=self.log_extra) s_os_client.delete_region_clients(self.region_name) raise exceptions.SyncRequestTimeout except (AttributeError, TypeError) as e: LOG.info("snmp_community_delete error {}".format(e), extra=self.log_extra) s_os_client.delete_region_clients(self.region_name, clear_token=True) raise exceptions.SyncRequestFailedRetry subcloud_rsrc.delete() # Master Resource can be deleted only when all subcloud resources # are deleted along with corresponding orch_job and orch_requests. LOG.info("SNMP community {}:{} [{}] deleted".format( rsrc.id, subcloud_rsrc.id, subcloud_rsrc.subcloud_resource_id), extra=self.log_extra) def update_remotelogging(self, values): s_os_client = sdk.OpenStackDriver(self.region_name) try: iremotelogging = s_os_client.sysinv_client.update_remotelogging( values) return iremotelogging except (exceptions.ConnectionRefused, exceptions.NotAuthorized, exceptions.TimeOut): LOG.info("update_remotelogging exception Timeout", extra=self.log_extra) s_os_client.delete_region_clients(self.region_name) raise exceptions.SyncRequestTimeout except (AttributeError, TypeError) as e: LOG.info("update_remotelogging error {} region_name".format(e), extra=self.log_extra) s_os_client.delete_region_clients(self.region_name, clear_token=True) raise exceptions.SyncRequestFailedRetry except Exception as e: LOG.exception(e) raise exceptions.SyncRequestFailedRetry def sync_remotelogging(self, request, rsrc): # The system is created with default remotelogging; thus there # is a prepopulated remotelogging entry. LOG.info("sync_remotelogging resource_info={}".format( request.orch_job.resource_info), extra=self.log_extra) remotelogging_dict = jsonutils.loads(request.orch_job.resource_info) payload = remotelogging_dict.get('payload') if not payload: LOG.info("sync_remotelogging No payload found in resource_info" "{}".format(request.orch_job.resource_info), extra=self.log_extra) return iremotelogging = self.update_remotelogging(payload) # Ensure subcloud resource is persisted to the DB for later subcloud_rsrc_id = self.persist_db_subcloud_resource( rsrc.id, iremotelogging.uuid) LOG.info("remotelogging {}:{} [{}/{}] updated".format(rsrc.id, subcloud_rsrc_id, iremotelogging.ip_address, iremotelogging.uuid), extra=self.log_extra) def update_firewallrules(self, firewall_sig, firewallrules=None): s_os_client = sdk.OpenStackDriver(self.region_name) try: ifirewallrules = s_os_client.sysinv_client.update_firewallrules( firewall_sig, firewallrules=firewallrules) return ifirewallrules except (exceptions.ConnectionRefused, exceptions.NotAuthorized, exceptions.TimeOut): LOG.info("update_firewallrules exception Timeout", extra=self.log_extra) s_os_client.delete_region_clients(self.region_name) raise exceptions.SyncRequestTimeout except (AttributeError, TypeError) as e: LOG.info("update_firewallrules error {} region_name".format(e), extra=self.log_extra) s_os_client.delete_region_clients(self.region_name, clear_token=True) raise exceptions.SyncRequestFailedRetry except Exception as e: LOG.exception(e) raise exceptions.SyncRequestFailedRetry def sync_firewallrules(self, request, rsrc): # The system is not created with default firewallrules LOG.info("sync_firewallrules resource_info={}".format( request.orch_job.resource_info), extra=self.log_extra) firewallrules_dict = jsonutils.loads(request.orch_job.resource_info) payload = firewallrules_dict.get('payload') # payload is the contents of the POST operation if not payload: LOG.info("sync_firewallrules No payload found in resource_info" "{}".format(request.orch_job.resource_info), extra=self.log_extra) return if isinstance(payload, dict): firewall_sig = payload.get('firewall_sig') else: firewall_sig = rsrc.master_id LOG.info("firewall_sig from master_id={}".format(firewall_sig)) ifirewallrules = None if firewall_sig: ifirewallrules = self.update_firewallrules(firewall_sig) else: firewall_sig = rsrc.master_id if firewall_sig and firewall_sig != self.FIREWALL_SIG_NULL: ifirewallrules = self.update_firewallrules( firewall_sig, firewallrules=payload) else: LOG.info("skipping firewall_sig={}".format(firewall_sig)) ifirewallrules_sig = None try: ifirewallrules_sig = \ ifirewallrules.get('firewallrules').get('firewall_sig') except Exception as e: LOG.warn("No ifirewallrules={} unknown e={}".format( ifirewallrules, e)) # Ensure subcloud resource is persisted to the DB for later subcloud_rsrc_id = self.persist_db_subcloud_resource( rsrc.id, firewall_sig) LOG.info("firewallrules {} {} [{}/{}] updated".format(rsrc.id, subcloud_rsrc_id, ifirewallrules_sig, firewall_sig), extra=self.log_extra) def update_certificate(self, signature, certificate=None, data=None): s_os_client = sdk.OpenStackDriver(self.region_name) try: icertificate = s_os_client.sysinv_client.update_certificate( signature, certificate=certificate, data=data) return icertificate except (exceptions.ConnectionRefused, exceptions.NotAuthorized, exceptions.TimeOut): LOG.info("update_certificate exception Timeout", extra=self.log_extra) s_os_client.delete_region_clients(self.region_name) raise exceptions.SyncRequestTimeout except (AttributeError, TypeError) as e: LOG.info("update_certificate error {} region_name".format(e), extra=self.log_extra) s_os_client.delete_region_clients(self.region_name, clear_token=True) raise exceptions.SyncRequestFailedRetry except Exception as e: LOG.exception(e) raise exceptions.SyncRequestFailedRetry @staticmethod def _decode_certificate_payload(certificate_dict): """Decode certificate from payload. params: certificate_dict returns: certificate, metadata """ certificate = None metadata = {} content_disposition = 'Content-Disposition' try: content_type = certificate_dict.get('content_type') payload = certificate_dict.get('payload') multipart_data = MultipartDecoder(payload, content_type) for part in multipart_data.parts: if ('name="passphrase"' in part.headers.get( content_disposition)): metadata.update({'passphrase': part.content}) elif ('name="mode"' in part.headers.get( content_disposition)): metadata.update({'mode': part.content}) elif ('name="file"' in part.headers.get( content_disposition)): certificate = part.content except Exception as e: LOG.warn("No certificate decode e={}".format(e)) LOG.info("_decode_certificate_payload metadata={}".format( metadata)) return certificate, metadata def sync_certificate(self, request, rsrc): LOG.info("sync_certificate resource_info={}".format( request.orch_job.resource_info), extra=self.log_extra) certificate_dict = jsonutils.loads(request.orch_job.resource_info) payload = certificate_dict.get('payload') if not payload: LOG.info("sync_certificate No payload found in resource_info" "{}".format(request.orch_job.resource_info), extra=self.log_extra) return if isinstance(payload, dict): signature = payload.get('signature') LOG.info("signature from dict={}".format(signature)) else: signature = rsrc.master_id LOG.info("signature from master_id={}".format(signature)) certificate, metadata = self._decode_certificate_payload( certificate_dict) isignature = None signature = rsrc.master_id if signature and signature != self.CERTIFICATE_SIG_NULL: icertificate = self.update_certificate( signature, certificate=certificate, data=metadata) cert_body = icertificate.get('certificates') if cert_body: isignature = cert_body.get('signature') else: LOG.info("skipping signature={}".format(signature)) # Ensure subcloud resource is persisted to the DB for later subcloud_rsrc_id = self.persist_db_subcloud_resource( rsrc.id, signature) LOG.info("certificate {} {} [{}/{}] updated".format(rsrc.id, subcloud_rsrc_id, isignature, signature), extra=self.log_extra) def update_user(self, passwd_hash, root_sig, passwd_expiry_days): LOG.info("update_user={} {} {}".format( passwd_hash, root_sig, passwd_expiry_days), extra=self.log_extra) try: s_os_client = sdk.OpenStackDriver(self.region_name) iuser = s_os_client.sysinv_client.update_user(passwd_hash, root_sig, passwd_expiry_days) return iuser except (exceptions.ConnectionRefused, exceptions.NotAuthorized, exceptions.TimeOut): LOG.info("update_user exception Timeout", extra=self.log_extra) s_os_client.delete_region_clients(self.region_name) raise exceptions.SyncRequestTimeout except (AttributeError, TypeError) as e: LOG.info("update_user error {} region_name".format(e), extra=self.log_extra) s_os_client.delete_region_clients(self.region_name, clear_token=True) raise exceptions.SyncRequestFailedRetry except Exception as e: LOG.exception(e) raise exceptions.SyncRequestFailedRetry def sync_user(self, request, rsrc): # The system is populated with user entry for wrsroot. LOG.info("sync_user resource_info={}".format( request.orch_job.resource_info), extra=self.log_extra) user_dict = jsonutils.loads(request.orch_job.resource_info) payload = user_dict.get('payload') passwd_hash = None if type(payload) is list: for ipayload in payload: if ipayload.get('path') == '/passwd_hash': passwd_hash = ipayload.get('value') elif ipayload.get('path') == '/root_sig': root_sig = ipayload.get('value') elif ipayload.get('path') == '/passwd_expiry_days': passwd_expiry_days = ipayload.get('value') else: passwd_hash = payload.get('passwd_hash') root_sig = payload.get('root_sig') passwd_expiry_days = payload.get('passwd_expiry_days') LOG.info("sync_user from dict passwd_hash={} root_sig={} " "passwd_expiry_days={}".format( passwd_hash, root_sig, passwd_expiry_days), extra=self.log_extra) if not passwd_hash: LOG.info("sync_user no user update found in resource_info" "{}".format(request.orch_job.resource_info), extra=self.log_extra) return iuser = self.update_user(passwd_hash, root_sig, passwd_expiry_days) # Ensure subcloud resource is persisted to the DB for later subcloud_rsrc_id = self.persist_db_subcloud_resource( rsrc.id, iuser.uuid) LOG.info("User wrsroot {}:{} [{}] updated" .format(rsrc.id, subcloud_rsrc_id, passwd_hash), extra=self.log_extra) # SysInv Audit Related def get_master_resources(self, resource_type): os_client = sdk.OpenStackDriver(consts.CLOUD_0) if resource_type == consts.RESOURCE_TYPE_SYSINV_DNS: return [self.get_dns_resource(os_client)] elif resource_type == consts.RESOURCE_TYPE_SYSINV_NTP: return [self.get_ntp_resource(os_client)] elif resource_type == consts.RESOURCE_TYPE_SYSINV_SNMP_COMM: return self.get_snmp_community_resources(os_client) elif resource_type == consts.RESOURCE_TYPE_SYSINV_SNMP_TRAPDEST: return self.get_snmp_trapdest_resources(os_client) elif resource_type == consts.RESOURCE_TYPE_SYSINV_REMOTE_LOGGING: return [self.get_remotelogging_resource(os_client)] elif resource_type == consts.RESOURCE_TYPE_SYSINV_FIREWALL_RULES: return [self.get_firewallrules_resource(os_client)] elif resource_type == consts.RESOURCE_TYPE_SYSINV_CERTIFICATE: return self.get_certificates_resources(os_client) elif resource_type == consts.RESOURCE_TYPE_SYSINV_USER: return [self.get_user_resource(os_client)] else: LOG.error("Wrong resource type {}".format(resource_type), extra=self.log_extra) return None def get_subcloud_resources(self, resource_type): os_client = sdk.OpenStackDriver(self.region_name) if resource_type == consts.RESOURCE_TYPE_SYSINV_DNS: return [self.get_dns_resource(os_client)] elif resource_type == consts.RESOURCE_TYPE_SYSINV_NTP: return [self.get_ntp_resource(os_client)] elif resource_type == consts.RESOURCE_TYPE_SYSINV_SNMP_COMM: return self.get_snmp_community_resources(os_client) elif resource_type == consts.RESOURCE_TYPE_SYSINV_SNMP_TRAPDEST: return self.get_snmp_trapdest_resources(os_client) elif resource_type == consts.RESOURCE_TYPE_SYSINV_REMOTE_LOGGING: return [self.get_remotelogging_resource(os_client)] elif resource_type == consts.RESOURCE_TYPE_SYSINV_FIREWALL_RULES: return [self.get_firewallrules_resource(os_client)] elif resource_type == consts.RESOURCE_TYPE_SYSINV_CERTIFICATE: return self.get_certificates_resources(os_client) elif resource_type == consts.RESOURCE_TYPE_SYSINV_USER: return [self.get_user_resource(os_client)] else: LOG.error("Wrong resource type {}".format(resource_type), extra=self.log_extra) return None def get_dns_resource(self, os_client): try: idns = os_client.sysinv_client.get_dns() return idns except (keystone_exceptions.connection.ConnectTimeout, keystone_exceptions.ConnectFailure) as e:
<filename>fairensics/methods/disparate_impact.py<gh_stars>10-100 """Wrapper and functions for DisparateImpact remover from fair-classification. The base class _DisparateImpact implements predict function for both methods. The classes AccurateDisparateImpact and FairDisparateImpact inherit from _DisparateImpact and implement fit() functions with different input signatures and algorithms for minimization. Original code: https://github.com/mbilalzafar/fair-classification """ import warnings from copy import deepcopy import numpy as np from aif360.algorithms import Transformer from aif360.datasets.binary_label_dataset import BinaryLabelDataset from scipy.optimize import minimize from .fairness_warnings import FairnessBoundsWarning, DataSetSkewedWarning from .utils import ( add_intercept, get_protected_attributes_dict, get_one_hot_encoding, LossFunctions, ) from ..fairensics_utils import get_unprotected_attributes class _DisparateImpact(Transformer): """Base class for the two methods removing disparate impact. Example: https://github.com/nikikilbertus/fairensics/blob/master/examples/2_1_fair-classification-disparate-impact-example.ipynb """ def __init__(self, loss_function, warn): """ Args: loss_function (str): loss function string from utils.LossFunctions. warn (bool): if true, warnings are raised on certain bounds. """ super(_DisparateImpact, self).__init__() self._warn = warn self._params = {} self._initialized = False self._loss_function = LossFunctions.get_loss_function(loss_function) def predict(self, dataset: BinaryLabelDataset): """Make predictions. Args: dataset: either AIF360 data set or np.ndarray. For AIF360 data sets, protected features will be ignored. For np.ndarray, only unprotected features should be included. Returns: Either AIF360 data set or np.ndarray if dataset is np.ndarray. """ if not self._initialized: raise ValueError("Model not initialized. Run `fit` first.") # TODO: ok? if isinstance(dataset, np.ndarray): return np.sign(np.dot(add_intercept(dataset), self._params["w"])) dataset_new = dataset.copy(deepcopy=True) dataset_new.labels = np.sign( np.dot( add_intercept(get_unprotected_attributes(dataset)), self._params["w"], ) ) # Map the dataset labels to back to their original values. temp_labels = dataset.labels.copy() temp_labels[(dataset_new.labels == 1.0)] = dataset.favorable_label temp_labels[(dataset_new.labels == -1.0)] = dataset.unfavorable_label dataset_new.labels = temp_labels.copy() if self._warn: bound_warnings = FairnessBoundsWarning(dataset, dataset_new) bound_warnings.check_bounds() return dataset_new @staticmethod def _get_cov_thresh_dict(cov_thresh, protected_attribute_names): """Return dict with covariance threshold for each protected attribute. Each attribute gets the same threshold (cov_thresh). Args: cov_thresh (float): the covariance threshold. protected_attribute_names (list(str)): list of protected attribute names. Returns: sensitive_attrs_to_cov_thresh (dict): dict of form {"sensitive_attribute_name_1":cov_thresh, ...}. """ sensitive_attrs_to_cov_thresh = {} for sens_attr_name in protected_attribute_names: sensitive_attrs_to_cov_thresh[sens_attr_name] = cov_thresh return sensitive_attrs_to_cov_thresh class AccurateDisparateImpact(_DisparateImpact): """Minimize loss subject to fairness constraints. Loss "L" defines whether a logistic regression or a liner SVM is trained. Minimize L(w) Subject to cov(sensitive_attributes, true_labels, predictions) < sensitive_attrs_to_cov_thresh Where: predictions: the distance to the decision boundary """ def __init__(self, loss_function=LossFunctions.NAME_LOG_REG, warn=True): super(AccurateDisparateImpact, self).__init__( loss_function=loss_function, warn=warn ) def fit( self, dataset: BinaryLabelDataset, sensitive_attrs_to_cov_thresh=0, sensitive_attributes=None, ): """Fit the model. Args: dataset: AIF360 data set sensitive_attrs_to_cov_thresh (float or dict): dictionary as returned by _get_cov_thresh_dict(). If a single float is passed the dict is generated using the _get_cov_thresh_dict() method. sensitive_attributes (list(str)): names of protected attributes to apply constraints to. """ if self._warn: dataset_warning = DataSetSkewedWarning(dataset) dataset_warning.check_dataset() # constraints are only applied to the selected sensitive attributes # if no list is provided, constraints are applied to all protected if sensitive_attributes is None: sensitive_attributes = dataset.protected_attribute_names # if sensitive_attrs_to_cov_thresh is not a dict, each sensitive # attribute gets the same threshold if not isinstance(sensitive_attrs_to_cov_thresh, dict): sensitive_attrs_to_cov_thresh = self._get_cov_thresh_dict( sensitive_attrs_to_cov_thresh, dataset.protected_attribute_names, ) # fair-classification takes the protected attributes as dict protected_attributes_dict = get_protected_attributes_dict( dataset.protected_attribute_names, dataset.protected_attributes ) # map labels to -1 and 1 temp_labels = dataset.labels.copy() temp_labels[(dataset.labels == dataset.favorable_label)] = 1.0 temp_labels[(dataset.labels == dataset.unfavorable_label)] = -1.0 self._params["w"] = self._train_model_sub_to_fairness( add_intercept(get_unprotected_attributes(dataset)), temp_labels.ravel(), protected_attributes_dict, sensitive_attributes, sensitive_attrs_to_cov_thresh, ) self._initialized = True return self def _train_model_sub_to_fairness( self, x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh, max_iter=10000, ): """ Optimize the loss function under fairness constraints. Args: x (np.ndarray): 2D array of unprotected features and intercept. y (np.ndarray): 1D array of labels. x_control (dict): dict of protected attributes as returned by get_protected_attributes_dict(). max_iter (int): maximum iterations for solver. sensitive_attrs, sensitive_attrs_to_cov_thresh: see fit() method. Returns: w (np.ndarray): 1D array of the learned weights. TODO: sensitive_attrs is redundant. sensitive_attrs_to_cov_thresh should only contain features for which constraints are applied. """ constraints = self._get_fairness_constraint_list( x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh ) x0 = np.random.rand(x.shape[1]) w = minimize( fun=self._loss_function, x0=x0, args=(x, y), method="SLSQP", options={"maxiter": max_iter}, constraints=constraints, ) if not w.success: warnings.warn( "Optimization problem did not converge. " "Check the solution returned by the optimizer:" ) print(w) return w.x def _get_fairness_constraint_list( self, x, y, x_control, sensitive_attrs, sensitive_attrs_to_cov_thresh ): """Get list of constraints for fairness. See fit method for details. Returns: constraints (list(str)): fairness constraints in cvxpy format. https://www.cvxpy.org/api_reference/cvxpy.constraints.html# """ constraints = [] for attr in sensitive_attrs: attr_arr = x_control[attr] attr_arr_transformed, index_dict = get_one_hot_encoding(attr_arr) if index_dict is None: # binary attribute thresh = sensitive_attrs_to_cov_thresh[attr] c = { "type": "ineq", "fun": self._test_sensitive_attr_constraint_cov, "args": (x, y, attr_arr_transformed, thresh, False), } constraints.append(c) else: # categorical attribute, need to set the cov threshs for attr_val, ind in index_dict.items(): attr_name = attr_val thresh = sensitive_attrs_to_cov_thresh[attr][attr_name] t = attr_arr_transformed[:, ind] c = { "type": "ineq", "fun": self._test_sensitive_attr_constraint_cov, "args": (x, y, t, thresh, False), } constraints.append(c) return constraints @staticmethod def _test_sensitive_attr_constraint_cov( model, x_arr, y_arr_dist_boundary, x_control, thresh, verbose ): """ The covariance is computed b/w the sensitive attr val and the distance from the boundary. If the model is None, we assume that the y_arr_dist_boundary contains the distance from the decision boundary. If the model is not None, we just compute a dot product or model and x_arr for the case of SVM, we pass the distance from boundary because the intercept in internalized for the class and we have compute the distance using the project function. This function will return -1 if the constraint specified by thresh parameter is not satisfied otherwise it will return +1 if the return value is >=0, then the constraint is satisfied. """ assert x_arr.shape[0] == x_control.shape[0] if len(x_control.shape) > 1: # make sure we just have one column assert x_control.shape[1] == 1 if model is None: arr = y_arr_dist_boundary # simply the output labels else: arr = np.dot( model, x_arr.T ) # the sign of this is the output label arr = np.array(arr, dtype=np.float64) cov = np.dot(x_control - np.mean(x_control), arr) / len(x_control) # <0 if covariance > thresh -- condition is not satisfied ans = thresh - abs(cov) if verbose is True: print("Covariance is", cov) print("Diff is:", ans) print() return ans class FairDisparateImpact(_DisparateImpact): """Minimize disparate impact subject to accuracy constraints. Loss "L" defines whether a logistic regression or a liner svm is trained. Minimize cov(sensitive_attributes, predictions) Subject to L(w) <= (1-gamma)L(w*) Where L(w*): is the loss of the unconstrained classifier predictions: the distance to the decision boundary """ def __init__(self, loss_function=LossFunctions.NAME_LOG_REG, warn=True): super(FairDisparateImpact, self).__init__( loss_function=loss_function, warn=warn ) def fit( self, dataset: BinaryLabelDataset, sensitive_attributes=None, sep_constraint=False, gamma=0, ): """Fits the model. Args: dataset: AIF360 data set. sensitive_attributes (list(str)): names of protected attributes to apply constraints to. sep_constraint (bool): apply fine grained accuracy constraint. gamma (float): trade off for accuracy for sep_constraint. """ if self._warn: dataset_warning = DataSetSkewedWarning(dataset) dataset_warning.check_dataset() # constraints are only applied to the selected sensitive attributes # if no list is provided, constraints for all protected attributes if sensitive_attributes is None: sensitive_attributes = dataset.protected_attribute_names # fair-classification takes the protected attributes as dict protected_attributes_dict = get_protected_attributes_dict( dataset.protected_attribute_names, dataset.protected_attributes ) # map labels to -1 and 1 temp_labels = dataset.labels.copy() temp_labels[(dataset.labels == dataset.favorable_label)] = 1.0 temp_labels[(dataset.labels == dataset.unfavorable_label)] = -1.0 self._params["w"] = self._train_model_sub_to_acc( add_intercept(get_unprotected_attributes(dataset)), temp_labels.ravel(), protected_attributes_dict, sensitive_attributes, sep_constraint, gamma, ) self._initialized = True return self def _train_model_sub_to_acc( self, x, y, x_control, sensitive_attrs, sep_constraint, gamma=None, max_iter=10000, ): """Optimize fairness subject to accuracy constraints. WARNING: Only first protected attribute is considered as constraint. All others are ignored. Args: x (np.ndarray): 2D array of unprotected features and intercept. y (np.ndarray): 1D array of labels. x_control (dict): dict of protected attributes as returned by get_protected_attributes_dict(). max_iter (int): maximum number of iterations for solver sep_constraint, sensitive_attrs, gamma: see fit() method Returns: w (np.ndarray): 1D, the learned weight vector for the classifier. """ def cross_cov_abs_optm_func(weight_vec, x_in, x_control_in_arr): cross_cov = x_control_in_arr - np.mean(x_control_in_arr) cross_cov *= np.dot(weight_vec, x_in.T) return float(abs(sum(cross_cov))) / float(x_in.shape[0]) x0 = np.random.rand(x.shape[1]) # get the initial loss without constraints w = minimize( fun=self._loss_function, x0=x0, args=(x, y), method="SLSQP", options={"maxiter": max_iter}, ) old_w = deepcopy(w.x) constraints = self._get_accuracy_constraint_list( x, y, x_control, w, gamma, sep_constraint, sensitive_attrs ) if len(x_control) > 1: warnings.warn( "Only the first protected attribute is considered " "with this constraint." ) # TODO: only the first protected attribute is passed # optimize for fairness under the unconstrained accuracy loss w
<reponame>koningrobot/blocks<gh_stars>0 import itertools import unittest from collections import OrderedDict import numpy import theano from numpy.testing import assert_allclose, assert_raises from theano import tensor from theano.gof.graph import is_same_graph from blocks.utils import is_shared_variable from blocks.bricks.base import application from blocks.bricks import Tanh from blocks.bricks.recurrent import ( recurrent, BaseRecurrent, GatedRecurrent, SimpleRecurrent, Bidirectional, LSTM, RecurrentStack, RECURRENTSTACK_SEPARATOR) from blocks.initialization import Constant, IsotropicGaussian, Orthogonal from blocks.filter import get_application_call, VariableFilter from blocks.graph import ComputationGraph from blocks.roles import INITIAL_STATE class RecurrentWrapperTestClass(BaseRecurrent): def __init__(self, dim, ** kwargs): super(RecurrentWrapperTestClass, self).__init__(self, ** kwargs) self.dim = dim def get_dim(self, name): if name in ['inputs', 'states', 'outputs', 'states_2', 'outputs_2']: return self.dim if name == 'mask': return 0 return super(RecurrentWrapperTestClass, self).get_dim(name) @recurrent(sequences=['inputs', 'mask'], states=['states', 'states_2'], outputs=['outputs', 'states_2', 'outputs_2', 'states'], contexts=[]) def apply(self, inputs=None, states=None, states_2=None, mask=None): next_states = states + inputs next_states_2 = states_2 + .5 if mask: next_states = (mask[:, None] * next_states + (1 - mask[:, None]) * states) outputs = 10 * next_states outputs_2 = 10 * next_states_2 return outputs, next_states_2, outputs_2, next_states class TestRecurrentWrapper(unittest.TestCase): def setUp(self): self.recurrent_example = RecurrentWrapperTestClass(dim=1) def test(self): X = tensor.tensor3('X') out, H2, out_2, H = self.recurrent_example.apply( inputs=X, mask=None) x_val = numpy.ones((5, 1, 1), dtype=theano.config.floatX) h = H.eval({X: x_val}) h2 = H2.eval({X: x_val}) out_eval = out.eval({X: x_val}) out_2_eval = out_2.eval({X: x_val}) # This also implicitly tests that the initial states are zeros assert_allclose(h, x_val.cumsum(axis=0)) assert_allclose(h2, .5 * (numpy.arange(5).reshape((5, 1, 1)) + 1)) assert_allclose(h * 10, out_eval) assert_allclose(h2 * 10, out_2_eval) class RecurrentBrickWithBugInInitialStates(BaseRecurrent): @recurrent(sequences=[], contexts=[], states=['states'], outputs=['states']) def apply(self, states): return states @recurrent(sequences=[], contexts=[], states=['states2'], outputs=['states2']) def apply2(self, states): return states def get_dim(self, name): return 100 def test_bug_in_initial_states(): def do(): brick = RecurrentBrickWithBugInInitialStates() brick.apply2(n_steps=3, batch_size=5) assert_raises(KeyError, do) class TestSimpleRecurrent(unittest.TestCase): def setUp(self): self.simple = SimpleRecurrent(dim=3, weights_init=Constant(2), activation=Tanh()) self.simple.initialize() def test_one_step(self): h0 = tensor.matrix('h0') x = tensor.matrix('x') mask = tensor.vector('mask') h1 = self.simple.apply(x, h0, mask=mask, iterate=False) next_h = theano.function(inputs=[h0, x, mask], outputs=[h1]) h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]], dtype=theano.config.floatX) x_val = 0.1 * numpy.array([[1, 2, 3], [4, 5, 6]], dtype=theano.config.floatX) mask_val = numpy.array([1, 0]).astype(theano.config.floatX) h1_val = numpy.tanh(h0_val.dot(2 * numpy.ones((3, 3))) + x_val) h1_val = mask_val[:, None] * h1_val + (1 - mask_val[:, None]) * h0_val assert_allclose(h1_val, next_h(h0_val, x_val, mask_val)[0]) def test_many_steps(self): x = tensor.tensor3('x') mask = tensor.matrix('mask') h = self.simple.apply(x, mask=mask, iterate=True) calc_h = theano.function(inputs=[x, mask], outputs=[h]) x_val = 0.1 * numpy.asarray(list(itertools.permutations(range(4))), dtype=theano.config.floatX) x_val = numpy.ones((24, 4, 3), dtype=theano.config.floatX) * x_val[..., None] mask_val = numpy.ones((24, 4), dtype=theano.config.floatX) mask_val[12:24, 3] = 0 h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX) for i in range(1, 25): h_val[i] = numpy.tanh(h_val[i - 1].dot( 2 * numpy.ones((3, 3))) + x_val[i - 1]) h_val[i] = (mask_val[i - 1, :, None] * h_val[i] + (1 - mask_val[i - 1, :, None]) * h_val[i - 1]) h_val = h_val[1:] assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04) # Also test that initial state is a parameter initial_state, = VariableFilter(roles=[INITIAL_STATE])( ComputationGraph(h)) assert is_shared_variable(initial_state) assert initial_state.name == 'initial_state' class TestLSTM(unittest.TestCase): def setUp(self): self.lstm = LSTM(dim=3, weights_init=Constant(2), biases_init=Constant(0)) self.lstm.initialize() def test_one_step(self): h0 = tensor.matrix('h0') c0 = tensor.matrix('c0') x = tensor.matrix('x') h1, c1 = self.lstm.apply(x, h0, c0, iterate=False) next_h = theano.function(inputs=[x, h0, c0], outputs=[h1]) h0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]], dtype=theano.config.floatX) c0_val = 0.1 * numpy.array([[1, 1, 0], [0, 1, 1]], dtype=theano.config.floatX) x_val = 0.1 * numpy.array([range(12), range(12, 24)], dtype=theano.config.floatX) W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX) W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX) W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX) W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX) # omitting biases because they are zero activation = numpy.dot(h0_val, W_state_val) + x_val def sigmoid(x): return 1. / (1. + numpy.exp(-x)) i_t = sigmoid(activation[:, :3] + c0_val * W_cell_to_in) f_t = sigmoid(activation[:, 3:6] + c0_val * W_cell_to_forget) next_cells = f_t * c0_val + i_t * numpy.tanh(activation[:, 6:9]) o_t = sigmoid(activation[:, 9:12] + next_cells * W_cell_to_out) h1_val = o_t * numpy.tanh(next_cells) assert_allclose(h1_val, next_h(x_val, h0_val, c0_val)[0], rtol=1e-6) def test_many_steps(self): x = tensor.tensor3('x') mask = tensor.matrix('mask') h, c = self.lstm.apply(x, mask=mask, iterate=True) calc_h = theano.function(inputs=[x, mask], outputs=[h]) x_val = (0.1 * numpy.asarray( list(itertools.islice(itertools.permutations(range(12)), 0, 24)), dtype=theano.config.floatX)) x_val = numpy.ones((24, 4, 12), dtype=theano.config.floatX) * x_val[:, None, :] mask_val = numpy.ones((24, 4), dtype=theano.config.floatX) mask_val[12:24, 3] = 0 h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX) c_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX) W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX) W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX) W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX) W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX) def sigmoid(x): return 1. / (1. + numpy.exp(-x)) for i in range(1, 25): activation = numpy.dot(h_val[i-1], W_state_val) + x_val[i-1] i_t = sigmoid(activation[:, :3] + c_val[i-1] * W_cell_to_in) f_t = sigmoid(activation[:, 3:6] + c_val[i-1] * W_cell_to_forget) c_val[i] = f_t * c_val[i-1] + i_t * numpy.tanh(activation[:, 6:9]) o_t = sigmoid(activation[:, 9:12] + c_val[i] * W_cell_to_out) h_val[i] = o_t * numpy.tanh(c_val[i]) h_val[i] = (mask_val[i - 1, :, None] * h_val[i] + (1 - mask_val[i - 1, :, None]) * h_val[i - 1]) c_val[i] = (mask_val[i - 1, :, None] * c_val[i] + (1 - mask_val[i - 1, :, None]) * c_val[i - 1]) h_val = h_val[1:] assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04) # Also test that initial state is a parameter initial1, initial2 = VariableFilter(roles=[INITIAL_STATE])( ComputationGraph(h)) assert is_shared_variable(initial1) assert is_shared_variable(initial2) assert {initial1.name, initial2.name} == { 'initial_state', 'initial_cells'} class TestRecurrentStack(unittest.TestCase): def setUp(self): depth = 4 self.depth = depth dim = 3 # don't change, hardwired in the code transitions = [LSTM(dim=dim) for _ in range(depth)] self.stack0 = RecurrentStack(transitions, weights_init=Constant(2), biases_init=Constant(0)) self.stack0.initialize() self.stack2 = RecurrentStack(transitions, weights_init=Constant(2), biases_init=Constant(0), skip_connections=True) self.stack2.initialize() def do_one_step(self, stack, skip_connections=False, low_memory=False): depth = self.depth # batch=2 h0_val = 0.1 * numpy.array([[[1, 1, 0], [0, 1, 1]]] * depth, dtype=theano.config.floatX) c0_val = 0.1 * numpy.array([[[1, 1, 0], [0, 1, 1]]] * depth, dtype=theano.config.floatX) x_val = 0.1 * numpy.array([range(12), range(12, 24)], dtype=theano.config.floatX) # we will use same weights on all layers W_state2x_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX) W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX) W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX) W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX) W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX) kwargs = OrderedDict() for d in range(depth): if d > 0: suffix = RECURRENTSTACK_SEPARATOR + str(d) else: suffix = '' if d == 0 or skip_connections: kwargs['inputs' + suffix] = tensor.matrix('inputs' + suffix) kwargs['inputs' + suffix].tag.test_value = x_val kwargs['states' + suffix] = tensor.matrix('states' + suffix) kwargs['states' + suffix].tag.test_value = h0_val[d] kwargs['cells' + suffix] = tensor.matrix('cells' + suffix) kwargs['cells' + suffix].tag.test_value = c0_val[d] results = stack.apply(iterate=False, low_memory=low_memory, **kwargs) next_h = theano.function(inputs=list(kwargs.values()), outputs=results) def sigmoid(x): return 1. / (1. + numpy.exp(-x)) h1_val = [] x_v = x_val args_val = [] for d in range(depth): if d == 0 or skip_connections: args_val.append(x_val) h0_v = h0_val[d] args_val.append(h0_v) c0_v = c0_val[d] args_val.append(c0_v) # omitting biases because they are zero activation = numpy.dot(h0_v, W_state_val) + x_v if skip_connections and d > 0: activation += x_val i_t = sigmoid(activation[:, :3] + c0_v * W_cell_to_in) f_t = sigmoid(activation[:, 3:6] + c0_v * W_cell_to_forget) next_cells = f_t * c0_v + i_t * numpy.tanh(activation[:, 6:9]) o_t = sigmoid(activation[:, 9:12] + next_cells * W_cell_to_out) h1_v = o_t * numpy.tanh(next_cells) # current layer output state transformed to input of next x_v = numpy.dot(h1_v, W_state2x_val) h1_val.append(h1_v) res = next_h(*args_val) for d in range(depth): assert_allclose(h1_val[d], res[d * 2], rtol=1e-6) def test_one_step(self): self.do_one_step(self.stack0) self.do_one_step(self.stack0, low_memory=True) self.do_one_step(self.stack2, skip_connections=True) self.do_one_step(self.stack2, skip_connections=True, low_memory=True) def do_many_steps(self, stack, skip_connections=False, low_memory=False): depth = self.depth # 24 steps # 4 batch examples # 12 dimensions per step x_val = (0.1 * numpy.asarray( list(itertools.islice(itertools.permutations(range(12)), 0, 24)), dtype=theano.config.floatX)) x_val = numpy.ones((24, 4, 12), dtype=theano.config.floatX) * x_val[:, None, :] # mask the last third of steps mask_val = numpy.ones((24, 4), dtype=theano.config.floatX) mask_val[12:24, 3] = 0 # unroll all states and cells for all steps and also initial value h_val = numpy.zeros((depth, 25, 4, 3), dtype=theano.config.floatX) c_val = numpy.zeros((depth, 25, 4, 3), dtype=theano.config.floatX) # we will use same weights on all layers W_state2x_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX) W_state_val = 2 * numpy.ones((3, 12), dtype=theano.config.floatX) W_cell_to_in = 2 * numpy.ones((3,), dtype=theano.config.floatX) W_cell_to_out = 2 * numpy.ones((3,), dtype=theano.config.floatX) W_cell_to_forget = 2 * numpy.ones((3,), dtype=theano.config.floatX) kwargs = OrderedDict() for d in range(depth): if d > 0: suffix = RECURRENTSTACK_SEPARATOR + str(d)
tools we need self.portal = DummySite(id='portal').__of__(self.app) cpm = self._makeOne() sm = getSiteManager() sm.registerUtility(cpm, ICachingPolicyManager) sm.registerUtility(DummyTool(), IMembershipTool) sm.registerUtility(DummyTool(), ITypesTool) # This is a FSPageTemplate that will be used as the View for # our content objects. It doesn't matter what it returns. path = os.path.join(self.skin_path_name, 'testPT2.pt') self.portal._setObject('dummy_view', FSPageTemplate('dummy_view', path)) uf = self.app.acl_users password = b'<PASSWORD>' uf.userFolderAddUser(portal_owner, password, ['Manager'], []) user = uf.getUserById(portal_owner) if not hasattr(user, 'aq_base'): user = user.__of__(uf) newSecurityManager(None, user) owner_auth = b'%s:%s' % (portal_owner, password) self.auth_header = 'Basic %s' % base64_encode(owner_auth) self.portal._setObject('doc1', DummyContent('doc1')) self.portal._setObject('doc2', DummyContent('doc2')) self.portal._setObject('doc3', DummyContent('doc3')) self.portal.doc1.modified_date = now self.portal.doc2.modified_date = now self.portal.doc3.modified_date = now # This policy only applies to doc1. It will not emit any ETag header # but it enables If-modified-since handling. cpm.addPolicy(policy_id='policy_no_etag', predicate='python:object.getId()=="doc1"', mtime_func='', max_age_secs=0, no_cache=0, no_store=0, must_revalidate=0, vary='', etag_func='', enable_304s=1) # This policy only applies to doc2. It will emit an ETag with # the constant value "abc" and also enable if-modified-since handling. cpm.addPolicy(policy_id='policy_etag', predicate='python:object.getId()=="doc2"', mtime_func='', max_age_secs=0, no_cache=0, no_store=0, must_revalidate=0, vary='', etag_func='string:abc', enable_304s=1) # This policy only applies to doc3. Etags with constant values of # "abc" are emitted, but if-modified-since handling is turned off. cpm.addPolicy(policy_id='policy_disabled', predicate='python:object.getId()=="doc3"', mtime_func='', max_age_secs=0, no_cache=0, no_store=0, must_revalidate=0, vary='', etag_func='string:abc', enable_304s=0) def tearDown(self): FSDVTest.tearDown(self) SecurityTest.tearDown(self) def _cleanup(self): # Clean up request and response req = self.portal.REQUEST for header in ('IF_MODIFIED_SINCE', 'HTTP_AUTHORIZATION', 'IF_NONE_MATCH'): if req.environ.get(header, None) is not None: del req.environ[header] req.RESPONSE.setStatus(200) def testUnconditionalGET(self): # In this case the Request does not specify any if-modified-since # value to take into account, thereby completely circumventing any # if-modified-since handling. This must not produce a response status # of 304, regardless of any other headers. self.portal.doc1() response = self.portal.REQUEST.RESPONSE self.assertEqual(response.getStatus(), 200) def testConditionalGETNoETag(self): yesterday = DateTime() - 1 doc1 = self.portal.doc1 request = doc1.REQUEST response = request.RESPONSE # If doc1 has beeen modified since yesterday (which it has), we want # the full rendering. request.environ['IF_MODIFIED_SINCE'] = rfc1123_date(yesterday) request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc1() self.assertEqual(response.getStatus(), 200) self._cleanup() # If doc1 has been modified since its creation (which it hasn't), we # want the full rendering. This must return a 304 response. request.environ['IF_MODIFIED_SINCE'] = rfc1123_date(doc1.modified_date) request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc1() self.assertEqual(response.getStatus(), 304) self.assertNotEqual(response.getHeader('cache-control'), None) self._cleanup() # ETag handling is not enabled in the policy for doc1, so asking for # one will not produce any matches. We get the full rendering. request.environ['IF_NONE_MATCH'] = '"123"' request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc1() self.assertEqual(response.getStatus(), 200) self._cleanup() # We are asking for an ETag as well as modifications after doc2 has # been created. Both won't match and wwe get the full rendering. request.environ['IF_NONE_MATCH'] = '"123"' request.environ['IF_MODIFIED_SINCE'] = rfc1123_date(doc1.modified_date) request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc1() self.assertEqual(response.getStatus(), 200) self._cleanup() def testConditionalGETETag(self): yesterday = DateTime() - 1 doc2 = self.portal.doc2 request = doc2.REQUEST response = request.RESPONSE # Has doc2 been modified since yesterday? Yes it has, so we get the # full rendering. request.environ['IF_MODIFIED_SINCE'] = rfc1123_date(yesterday) request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc2() self.assertEqual(response.getStatus(), 200) self._cleanup() # If doc2 has not been modified since its creation (which it hasn't), # we would get a 304 here. However, the policy for doc2 also expects # to get an ETag in the request, which we are not setting here. So # the policy fails and we get a full rendering. request.environ['IF_MODIFIED_SINCE'] = rfc1123_date(doc2.modified_date) request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc2() self.assertEqual(response.getStatus(), 200) self._cleanup() # Now we are setting an ETag in our request, but an ETag that does not # match the policy's expected value. The policy fails and we get the # full rendering. request.environ['IF_NONE_MATCH'] = '"123"' request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc2() self.assertEqual(response.getStatus(), 200) self._cleanup() # Here we provide the correct and matching ETag value, and we don't # specify any if-modified-since condition. This is enough for our # policy to trigger 304. request.environ['IF_NONE_MATCH'] = '"abc"' request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc2() self.assertEqual(response.getStatus(), 304) self.assertNotEqual(response.getHeader('cache-control'), None) self._cleanup() # We specify an ETag and a modification time condition that dooes not # match, so we get the full rendering request.environ['IF_MODIFIED_SINCE'] = rfc1123_date(doc2.modified_date) request.environ['IF_NONE_MATCH'] = '"123"' request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc2() self.assertEqual(response.getStatus(), 200) self._cleanup() # We hand in a matching modified time condition which is supposed to # trigger full rendering. This will lead the ETag condition to be # overrridden. request.environ['IF_MODIFIED_SINCE'] = rfc1123_date(yesterday) request.environ['IF_NONE_MATCH'] = '"abc"' request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc2() self.assertEqual(response.getStatus(), 200) self._cleanup() # Now we pass an ETag that matches the policy and a modified time # condition that is not fulfilled. It is safe to serve a 304. request.environ['IF_MODIFIED_SINCE'] = rfc1123_date(doc2.modified_date) request.environ['IF_NONE_MATCH'] = '"abc"' request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc2() self.assertEqual(response.getStatus(), 304) self.assertNotEqual(response.getHeader('cache-control'), None) self._cleanup() def testConditionalGETDisabled(self): doc3 = self.portal.doc3 request = doc3.REQUEST response = request.RESPONSE # Our policy disables any 304-handling, so even though the ETag matches # the policy, we will get the full rendering. request.environ['IF_NONE_MATCH'] = '"abc"' request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc3() self.assertEqual(response.getStatus(), 200) self._cleanup() # Now both the ETag and the modified condition would trigger a 304 # response *if* 304-handling was enabled. It is not in our policy, so # we get the full rendering again. request.environ['IF_MODIFIED_SINCE'] = rfc1123_date(doc3.modified_date) request.environ['IF_NONE_MATCH'] = '"abc"' request.environ['HTTP_AUTHORIZATION'] = self.auth_header doc3() self.assertEqual(response.getStatus(), 200) self._cleanup() class FSObjMaker(FSDVTest): def _makeFSPageTemplate(self, id, filename): path = path_join(self.skin_path_name, filename) return FSPageTemplate(id, path) def _makeFSDTMLMethod(self, id, filename): path = path_join(self.skin_path_name, filename) return FSDTMLMethod(id, path) class NestedTemplateTests(TransactionalTest, FSObjMaker): layer = TraversingZCMLLayer def _getTargetClass(self): from ..CachingPolicyManager import CachingPolicyManager return CachingPolicyManager def _makeOne(self, *args, **kw): return self._getTargetClass()(*args, **kw) def setUp(self): from ..interfaces import ICachingPolicyManager FSObjMaker.setUp(self) TransactionalTest.setUp(self) # Create a fake portal and the tools we need self.portal = DummySite(id='portal').__of__(self.app) self.cpm = self._makeOne() sm = getSiteManager() sm.registerUtility(self.cpm, ICachingPolicyManager) sm.registerUtility(DummyTool(), IMembershipTool) sm.registerUtility(DummyTool(), ITypesTool) def tearDown(self): TransactionalTest.tearDown(self) FSObjMaker.tearDown(self) def test_subtemplate_cpm_1(self): # test that subtemplates dont call the cpm # set up site portal = self.portal now = DateTime() cpm = self.cpm cpm.addPolicy(policy_id='policy_op2', predicate='python:view=="output_page_2"', mtime_func='', max_age_secs=100, no_cache=0, no_store=0, must_revalidate=0, vary='doc1', etag_func='', s_max_age_secs=100, ) content = DummyContent(id='content', view_id='output_page_1') content.modified_date = now portal._setObject('content', content) output_page_1 = self._makeFSPageTemplate('output_page_1', 'output_page_1.zpt') output_page_2 = self._makeFSPageTemplate('output_page_2', 'output_page_2.zpt') portal._setObject('output_page_1', output_page_1) portal._setObject('output_page_2', output_page_2) portal.content() # no headers should be added by the CPM if all is well headers = {x.lower() for x in self.RESPONSE.headers} self.assertFalse('x-cache-headers-set-by' in headers) self.assertFalse('vary' in headers) def test_subtemplate_cpm_2(self): # test that calling content from a template doesnt call the cpm # just calling an FSDTMLMethod directly from another template does # not activate the bug because RESPONSE is not passed in portal = self.portal now = DateTime() cpm = self.cpm cpm.addPolicy(policy_id='policy_op4', predicate='python:view=="output_page_4"', mtime_func='', max_age_secs=100, no_cache=0, no_store=0, must_revalidate=0, vary='doc1', etag_func='', s_max_age_secs=100, ) content = DummyContent(id='content', view_id='output_page_3') content.modified_date = now portal._setObject('content', content) content2 = DummyContent(id='content2', view_id='output_page_4') content2.modified_date = now portal._setObject('content2', content2) output_page_3 = self._makeFSDTMLMethod('output_page_3', 'output_page_3.dtml') output_page_4 = self._makeFSDTMLMethod('output_page_4', 'output_page_4.dtml') portal._setObject('output_page_4', output_page_4) portal._setObject('output_page_3', output_page_3) # call the content portal.content() # no headers should be added by the CPM if all is well headers = {x.lower() for x in self.RESPONSE.headers} self.assertFalse('x-cache-headers-set-by' in headers) self.assertFalse('vary' in headers) def test_subtemplate_cpm_3(self): # test a bigger mix of zpt templates # set up site portal = self.portal now = DateTime() cpm = self.cpm cpm.addPolicy(policy_id='policy_nv1', predicate='python:view=="nested_view_1"', mtime_func='', max_age_secs=100, no_cache=0, no_store=0, must_revalidate=0, vary='doc1', etag_func='', s_max_age_secs=100, ) doc1 = DummyContent(id='doc1', view_id='nested_view') doc1.modified_date = now portal._setObject('doc1', doc1) doc2 = DummyContent(id='doc2', view_id='nested_view_1') doc2.modified_date = now portal._setObject('doc2', doc2) doc3 = DummyContent(id='doc3', view_id='nested_view_2') doc3.modified_date = now portal._setObject('doc3', doc3) nested_view = self._makeFSPageTemplate('nested_view', 'nested_view.zpt') nested_view_1 = self._makeFSPageTemplate('nested_view_1', 'nested_view_1.zpt') nested_view_2 = self._makeFSPageTemplate('nested_view_2', 'nested_view_2.zpt') portal._setObject('nested_view', nested_view) portal._setObject('nested_view_1', nested_view_1) portal._setObject('nested_view_2', nested_view_2) portal.doc1() # no headers should be added by the CPM if all is well headers = {x.lower() for x in self.RESPONSE.headers} self.assertFalse('x-cache-headers-set-by' in headers) self.assertFalse('vary' in headers) def test_mixed_subtemplate_cpm(self): # test a mix of zpt and dtml templates # set up site now = DateTime() portal = self.portal cpm = self.cpm cpm.addPolicy(policy_id='policy_nv1', predicate='python:view=="nested_view_1"', mtime_func='', max_age_secs=100, no_cache=0, no_store=0, must_revalidate=0, vary='doc1', etag_func='', s_max_age_secs=100, ) doc1 = DummyContent(id='doc1', view_id='nested_view', modified_date=now) portal._setObject('doc1', doc1) doc2 = DummyContent(id='doc2', view_id='nested_view_1', modified_date=now) portal._setObject('doc2', doc2) doc3 = DummyContent(id='doc3', view_id='nested_view_2', modified_date=now) portal._setObject('doc3', doc3) nested_view = self._makeFSPageTemplate('nested_view', 'nested_view.zpt') nested_view_1 = self._makeFSPageTemplate('nested_view_1', 'nested_view_1.zpt') nested_view_2 = self._makeFSDTMLMethod('nested_view_2', 'nested_view_2.dtml') portal._setObject('nested_view', nested_view) portal._setObject('nested_view_1', nested_view_1) portal._setObject('nested_view_2', nested_view_2) portal.doc1() # no headers should be added by
self.title()) def get(self, force=False, *args): if force or not hasattr(self, '_content'): WikibasePage.get(self, force=force, *args) self.type = self._content['datatype'] def getType(self): """ Returns the type that this item uses Examples: item, commons media file, StringValue, NumericalValue """ if not hasattr(self, 'type'): self.type = self.repo.getPropertyType(self) return self.type class QueryPage(WikibasePage): """ For future usage, not implemented yet """ def __init__(self, site, title): WikibasePage.__init__(self, site, title, ns=122) self.id = self.title(withNamespace=False).lower() if not self.id.startswith(u'u'): raise ValueError(u"'%s' is not a query page!" % self.title()) class Claim(PropertyPage): """ Claims are standard claims as well as references. """ def __init__(self, site, pid, snak=None, hash=None, isReference=False, isQualifier=False): """ Defined by the "snak" value, supplemented by site + pid """ PropertyPage.__init__(self, site, 'Property:' + pid) self.snak = snak self.hash = hash self.isReference = isReference self.isQualifier = isQualifier if self.isQualifier and self.isReference: raise ValueError(u'Claim cannot be both a qualifier and reference.') self.sources = [] self.qualifiers = {} self.target = None self.snaktype = 'value' self.on_item = None # The item it's on @staticmethod def fromJSON(site, data): """ Creates the claim object from JSON returned in the API call. """ claim = Claim(site, data['mainsnak']['property']) if 'id' in data: claim.snak = data['id'] elif 'hash' in data: claim.isReference = True claim.hash = data['hash'] else: claim.isQualifier = True claim.snaktype = data['mainsnak']['snaktype'] if claim.getSnakType() == 'value': if claim.getType() == 'wikibase-item': claim.target = ItemPage(site, 'Q' + str(data['mainsnak']['datavalue']['value']['numeric-id'])) elif claim.getType() == 'commonsMedia': claim.target = ImagePage(site.image_repository(), 'File:' + data['mainsnak']['datavalue']['value']) elif claim.getType() == 'globecoordinate': claim.target = pywikibot.Coordinate.fromWikibase(data['mainsnak']['datavalue']['value'], site) else: #This covers string type claim.target = data['mainsnak']['datavalue']['value'] if 'references' in data: for source in data['references']: claim.sources.append(Claim.referenceFromJSON(site, source)) if 'qualifiers' in data: for prop in data['qualifiers']: for qualifier in data['qualifiers'][prop]: qual = Claim.qualifierFromJSON(site, qualifier) if prop in claim.qualifiers: claim.qualifiers[prop].append(qual) else: claim.qualifiers[prop] = [qual] return claim @staticmethod def referenceFromJSON(site, data): """ Reference objects are represented a bit differently, and require some more handling. """ mainsnak = data['snaks'].values()[0][0] wrap = {'mainsnak': mainsnak, 'hash': data['hash']} return Claim.fromJSON(site, wrap) @staticmethod def qualifierFromJSON(site, data): """ Qualifier objects are represented a bit differently like references, but I'm not sure if this even requires it's own function. """ wrap = {'mainsnak': data} return Claim.fromJSON(site, wrap) def setTarget(self, value): """ Sets the target to the passed value. There should be checks to ensure type compliance """ types = {'wikibase-item': ItemPage, 'string': basestring, 'commonsMedia': ImagePage, 'globecoordinate': pywikibot.Coordinate, } if self.getType() in types: if not isinstance(value, types[self.getType()]): raise ValueError("%s is not type %s." % (value, str(types[self.getType()]))) self.target = value def changeTarget(self, value=None, snaktype='value', **kwargs): """ This actually saves the new target. """ if value: self.setTarget(value) data = self.repo.changeClaimTarget(self, snaktype=snaktype, **kwargs) #TODO: Re-create the entire item from JSON, not just id self.snak = data['claim']['id'] def getTarget(self): """ Returns object that the property is associated with. None is returned if no target is set """ return self.target def getSnakType(self): """ Returns the "snaktype" Can be "value", "somevalue" or "novalue" """ return self.snaktype def setSnakType(self, value): if value in ['value', 'somevalue', 'novalue']: self.snaktype = value else: raise ValueError("snaktype must be 'value', 'somevalue', or 'novalue'.") def changeSnakType(self, value=None, **kwargs): """ This actually saves the new snakvalue. TODO: Is this function really needed? """ if value: self.setSnakType(value) self.changeTarget(snaktype=self.getSnakType(), **kwargs) def getSources(self): """ Returns a list of Claims """ return self.sources def addSource(self, source, **kwargs): """ source is a Claim. adds it as a reference. """ data = self.repo.editSource(self, source, new=True, **kwargs) source.hash = data['reference']['hash'] self.on_item.lastrevid = data['pageinfo']['lastrevid'] self.sources.append(source) def _formatDataValue(self): """ Format the target into the proper JSON datavalue that Wikibase wants """ if self.getType() == 'wikibase-item': value = {'entity-type': 'item', 'numeric-id': self.getTarget().getID(numeric=True)} elif self.getType() == 'string': value = self.getTarget() elif self.getType() == 'commonsMedia': value = self.getTarget().title(withNamespace=False) elif self.getType() == 'globecoordinate': value = self.getTarget().toWikibase() else: raise NotImplementedError('%s datatype is not supported yet.' % self.getType()) return value class Revision(object): """A structure holding information about a single revision of a Page.""" def __init__(self, revid, timestamp, user, anon=False, comment=u"", text=None, minor=False): """All parameters correspond to object attributes (e.g., revid parameter is stored as self.revid) @param revid: Revision id number @type revid: int @param text: Revision wikitext. @type text: unicode, or None if text not yet retrieved @param timestamp: Revision time stamp @type timestamp: pywikibot.Timestamp @param user: user who edited this revision @type user: unicode @param anon: user is unregistered @type anon: bool @param comment: edit comment text @type comment: unicode @param minor: edit flagged as minor @type minor: bool """ self.revid = revid self.text = text self.timestamp = timestamp self.user = user self.anon = anon self.comment = comment self.minor = minor class Link(object): """A Mediawiki link (local or interwiki) Has the following attributes: - site: The Site object for the wiki linked to - namespace: The namespace of the page linked to (int) - title: The title of the page linked to (unicode); does not include namespace or section - section: The section of the page linked to (unicode or None); this contains any text following a '#' character in the title - anchor: The anchor text (unicode or None); this contains any text following a '|' character inside the link """ illegal_titles_pattern = re.compile( # Matching titles will be held as illegal. u'''[^ %!\"$&'()*,\\-.\\/0-9:;=?@A-Z\\\\^_`a-z~\u0080-\uFFFF+]''' # URL percent encoding sequences interfere with the ability # to round-trip titles -- you can't link to them consistently. u'|%[0-9A-Fa-f]{2}' # XML/HTML character references produce similar issues. u'|&[A-Za-z0-9\x80-\xff]+;' u'|&#[0-9]+;' u'|&#x[0-9A-Fa-f]+;' ) def __init__(self, text, source=None, defaultNamespace=0): """Constructor @param text: the link text (everything appearing between [[ and ]] on a wiki page) @type text: unicode @param source: the Site on which the link was found (not necessarily the site to which the link refers) @type source: Site @param defaultNamespace: a namespace to use if the link does not contain one (defaults to 0) @type defaultNamespace: int """ assert source is None or isinstance(source, pywikibot.site.BaseSite), \ "source parameter should be a Site object" self._text = text self._source = source or pywikibot.Site() self._defaultns = defaultNamespace # preprocess text (these changes aren't site-dependent) # First remove anchor, which is stored unchanged, if there is one if u"|" in self._text: self._text, self._anchor = self._text.split(u"|", 1) else: self._anchor = None # Clean up the name, it can come from anywhere. # Convert HTML entities to unicode t = html2unicode(self._text) # Convert URL-encoded characters to unicode t = url2unicode(t, site=self._source) # Normalize unicode string to a NFC (composed) format to allow # proper string comparisons. According to # http://svn.wikimedia.org/viewvc/mediawiki/branches/REL1_6/phase3/includes/normal/UtfNormal.php?view=markup # the mediawiki code normalizes everything to NFC, not NFKC # (which might result in information loss). t = unicodedata.normalize('NFC', t) # This code was adapted from Title.php : secureAndSplit() # if u'\ufffd' in t: raise pywikibot.Error("Title contains illegal char (\\uFFFD)") # Replace underscores by spaces t = t.replace(u"_", u" ") # replace multiple spaces with a single space while u" " in t: t = t.replace(u" ", u" ") # Strip spaces at both ends t = t.strip() # Remove left-to-right and right-to-left markers. t = t.replace(u"\u200e", u"").replace(u"\u200f", u"") self._text = t def __repr__(self): return "pywikibot.page.Link(%r, %r)" % (self.title, self.site) def parse_site(self): """Parse only enough text to determine which site the link points to. This method does not parse anything after the first ":"; links with multiple interwiki prefixes (such as "wikt:fr:Parlais") need to be re-parsed on the first linked wiki to get the actual site. @return: tuple of (familyname, languagecode) for the linked site. """ t = self._text fam = self._source.family code = self._source.code while u":" in t: # Initial colon if t.startswith(u":"): # remove the colon but continue processing # remove any subsequent whitespace t = t.lstrip(u":").lstrip(u" ") continue prefix = t[ :t.index(u":")].lower() # part of text before : ns = self._source.ns_index(prefix) if ns: # The prefix is a namespace in the source wiki return (fam.name, code) if prefix in fam.langs: # prefix is a language code within the source wiki family return (fam.name, prefix) known = fam.get_known_families(site=self._source) if prefix in known: if known[prefix]
#!/usr/bin/python # -*- coding: utf-8 -*- """ This module contains all functions necessary to run the pycovid module """ __version__ = '0.0.1' __author__ = '<NAME>' __email__ = '<EMAIL>' __license__ = 'GPL' __status__ = 'Development' __date__ = '08/17/2020' def commit_to_repo(repo, message=None, log=None): ''' This function commits to the git repository active branch. Parameters ---------- repo: obj, gitpython gitpython object containing the git repository data ''' import os import sys git_dir = r"C:\Program Files\Git\cmd" git_bin = os.path.join(git_dir, "git") os.putenv("GIT_PYTHON_GIT_EXECUTABLE", git_bin) os.environ.putenv("GIT_PYTHON_GIT_EXECUTABLE", git_bin) # Making sure that it is first in PATH sys.path = [git_dir] + sys.path os.environ["PATH"] = os.pathsep.join([git_dir]) + os.pathsep + os.environ["PATH"] # Only import git now, because that's when the path is checked! import git from datetime import datetime # Creating commit information for repo index: now_str = datetime.now() now_str = datetime.strftime(now_str, format='%Y-%m-%d %Hh%Mm') if message != None: summary = message else: summary = "automated update {}".format(now_str) if log is None: log=[] try: repo.git.add(update=True) repo.index.commit(summary) origin = repo.remote(name='origin') origin.push() log.append('----\n') log.append('Commit process succesfull\n') log.append('----\n') except: log.append('----\n') log.append('Not able to commit. Please check git information\n') log.append('----\n') return log def get_date_modified(file_path): ''' DataFrame object with last modified date information. This function returns a DataFrame with the files in the directory given by file_path as index and their last modified date as column Parameters ---------- file_path : str local directory where the files are located ''' from pandas import DataFrame, Series from os import walk, path from time import ctime lm_dict = {} for root, dirs, files in walk(file_path): for item in files: if '.csv' in item: lm_dict[item] = ctime( path.getmtime(path.join(root,item)) ) return DataFrame(Series(lm_dict),columns=['last_modified']) def repo_info(repo,log=None): ''' This function returns the information of the git repository. This algorithm is a direct adaptation of the one presented at: https://www.fullstackpython.com/blog/first-steps-gitpython.html ''' import os repo_path = os.getenv('GIT_REPO_PATH') # Repo object used to programmatically interact with Git repositories if log is None: log=[] # check that the repository loaded correctly if not repo.bare: log.append('Repo at {} successfully loaded.\n'.format(repo_path)) log.append('Repo local path: {}\n'.format(repo.git.working_dir)) log.append('Repo description: {}\n'.format(repo.description)) log.append('Repo active branch: {}\n'.format(repo.active_branch)) for remote in repo.remotes: log.append('Remote named "{}" with URL "{}"\n'.format(remote, remote.url)) log.append('Last commit for repo: {}.\n'.format(str(repo.head.commit.hexsha))) # take the last commit then print some information commits = list(repo.iter_commits('master'))[:1] for commit in commits: log.append('----\n') log.append('commit: {}\n'.format(str(commit.hexsha))) log.append("\"{}\" by {} ({})\n".format(commit.summary, commit.author.name, commit.author.email)) log.append(str(commit.authored_datetime)+'\n') log.append(str("count: {} and size: {}".format(commit.count(), commit.size))+'\n') return log def country_mapping_function(country): if country in country_mapping_dict.keys(): return country_mapping_dict[country] else: return country def raw_data_formatter(file_list,file_dir): import pandas as pd from datetime import datetime from os import path df = pd.DataFrame() for arquivo in file_list: file = path.join(file_dir, arquivo) date=datetime.strptime(arquivo.split(sep='.')[0],'%m-%d-%Y') df_arquivo = pd.read_csv(file) df_arquivo['Date'] = date df = pd.concat([df,df_arquivo]) # Merging the data from columns with same content but different headers: Country = df.Country_Region Province = df.Province_State Last_Update = df.Last_Update Latitude = df.Lat Longitude = df.Long_ df_aux = pd.DataFrame({'Country/Region': Country,'Province/State': Province, 'Last Update': Last_Update,'Latitude': Latitude, 'Longitude': Longitude}) df = df.combine_first(df_aux) # Dropping columns that won't be used: df.drop(axis=1,labels=['Country_Region','Province_State','Last_Update', 'FIPS','Combined_Key','Long_','Lat','Admin2', 'Incidence_Rate','Case-Fatality_Ratio', 'Last Update'],inplace=True) # Replacing NaN values on numeric data with 0: new_values = {'Deaths': 0, 'Active': 0, 'Recovered': 0, 'Confirmed': 0,'Latitude': 0, 'Longitude': 0} df.fillna(value=new_values,inplace=True) # Replacing NaN values on non numeric data with '-': df.fillna(value='-',inplace=True) # mapping the countrie correctly: label_map = pd.read_csv('label_map.csv',header=None,index_col=0) country_mapping_dict = label_map.loc[label_map[2] == 'country'][1].to_dict() province_mapping_dict = label_map.loc[label_map[2] == 'province'][1].to_dict() df['Country/Region'] = df['Country/Region'].transform(lambda x: country_mapping_dict[x] if x in country_mapping_dict.keys() else x) df['Province/State'] = df['Province/State'].transform(lambda x: province_mapping_dict[x] if x in province_mapping_dict.keys() else x) # Establishing number of active cases as the difference between # Confirmed cases and Death cases: df['Active'] = df['Confirmed'] - df['Deaths'] - df['Recovered'] # Calculating Mortality rate as the ratio between Deaths and # Confirmed cases for each day: df['Mortality rate in %']=(df['Deaths']/df['Confirmed']*100).fillna(value=0) return df def world_data_formatter(df): ''' Creates the world data report from the raw data dataframe. This function works along the raw_data as returned by the raw_data_formatter function. Changes in raw_data_formatter affect directly this function. It creates all columns necessary for analysis with Tableau from the John Hopkins Data Science Center and it returns a new DataFrame object with calculated columns. Parameters ---------- df: obj, DataFrame the raw data DataFrame as returned by the raw_data_formatter function. ''' import pandas as pd from numpy import inf, NaN, where from datetime import datetime df_by_country = df.groupby(['Country/Region','Date']).sum().reset_index() # Calculating the number of days since the 1st case: df_by_country['Days_since_1st_case'] = df_by_country['Date'] countries = df_by_country['Country/Region'].unique() for country in countries: idx = where(df_by_country['Country/Region'] == country) first_date = pd.to_datetime(df_by_country['Date'].loc[min(idx[0])]) for index in idx[0]: date_diff = (pd.to_datetime(df_by_country.at[index,'Days_since_1st_case']) - first_date).days df_by_country.at[index,'Days_since_1st_case'] = date_diff # columns over which the calculations will be performed root_columns = ['Active','Confirmed','Deaths','Recovered'] # creating columns of daily percentage of increase in values: for col in root_columns: col_daily_inc = col + "_daily_%inc_by_country" col_new_cases = col + '_new_cases' col_new_cases_inc = col + '_new_cases_inc_rate' col_new_cases_inc_speed = col + '_new_cases_inc_rate_speed' df_by_country[col_new_cases] = (df_by_country[col] - df_by_country[col].shift(periods=1) ).fillna(value=0) df_by_country[col_daily_inc] = df_by_country[col].pct_change().replace([inf, NaN], 0)*100 # 1st derivative of column datas. It represents the rate of change in new cases: df_by_country[col_new_cases_inc] = (df_by_country[col_new_cases] - df_by_country[col_new_cases].shift(periods=1) ).fillna(value=0) # 2nd derivative of column datas. It represents the acceleration of the increase rate # of the new cases: df_by_country[col_new_cases_inc_speed] = (df_by_country[col_new_cases_inc] - df_by_country[col_new_cases_inc].shift(periods=1) ).fillna(value=0) return df_by_country def province_data_formatter(df): ''' Creates the world data report from the raw data dataframe. This function works along the raw_data as returned by the raw_data_formatter function. Changes in raw_data_formatter affect directly this function. It creates all columns necessary for analysis with Power BI from the John Hopkins Data Science Center and it returns a new DataFrame object with calculated columns. Parameters ---------- raw_data: obj, DataFrame the raw data DataFrame as returned by the raw_data_formatter function. ''' from pandas import concat columns = ['Province/State','Country/Region','Date','Confirmed', 'Active','Recovered','Deaths'] df = df[columns].groupby(['Province/State','Country/Region','Date']).sum().reset_index() columns = ['Confirmed','Active','Recovered','Deaths'] new_cases = [item + ' new cases' for item in columns] df[new_cases] = df.groupby('Province/State')[columns].diff().fillna(value=0) columns_mov_avg = columns.copy() columns_mov_avg.extend(new_cases) mov_avg = [3,7,15] df_province = df.copy() for day in mov_avg: new_columns = [item + ' {}-day mov avg'.format(day) for item in columns_mov_avg] df_aux = df.groupby('Province/State').rolling(day).mean().fillna(value=0).reset_index() df_aux.drop(['Province/State','level_1'],axis=1,inplace=True) df_aux.columns = new_columns df_province = concat([df_province,df_aux],axis=1) return df_province def flourish_racing_bars(df,parameters,initial_date,file_dir,file_name='racing_bars'): ''' With this function it is possible to generate the dataset as used by Florish Studio @ https://app.flourish.studio/@psycho.presley Parameters ---------- df: obj, DataFrame pandas DataFrame with the data to be used. The DataFrame must have been generated by the world_data_formatter function presented in the pycovidfunc.py module parameters: str, array-like list with the columns of df to be used. Each column will generate one separate and independent file to be used in Flourish studio initial_date: str string of the date in the YYYY-MM-DD format to be the first date to be considered in the final file file_dir: str string of the root dir where the flourish data must be saved file_name: str the name of the *.csv file to be created ''' from pandas import DataFrame, concat from os import path print('--------------------------') print('Creating files for the flourish racing bars chart') try: countries = df['Country/Region'].unique().tolist() for item in parameters: print('creating the {} cases file'.format(item)) columns = ['Country/Region','Date',item] flourish = DataFrame() for country in countries: df_aux = df[columns].loc[df['Country/Region'] == country] df_aux = df_aux.pivot(index='Country/Region',columns='Date', values=item) flourish = concat([flourish,df_aux]).interpolate(method='linear',limit=3) flourish.fillna(method='bfill',inplace=True) file = path.join(file_dir,file_name + '_' + item + '.csv') flourish.loc[:,initial_date:].to_csv(file) print('Files created succesfully!') except: print('Process aborted! No files for flourish studio were created.') finally: print('End execution of the flourish racing bars chart function.') print('--------------------------') def flourish_parliament_map(df,seats,region_mapping_dict,file_dir,places=1000,file_name='parliament_map'): ''' With this function it is possible to generate the dataset as used by the parliament map viz in Florish Studio @ https://app.flourish.studio/@psycho.presley Parameters ---------- df: obj, DataFrame pandas DataFrame with the data to be used. The DataFrame must have been generated by the world_data_formatter function presented in the pycovidfunc.py module seats: str, array-like list with the columns of df to be used as seats. Each column represents one seat tab in the Flourish studio parliament chart region_mapping_chart: dict dictionary with the countries as keys and their region as values for region mapping file_dir: str string of the root dir where the flourish data must be saved places: int desired
#!/usr/bin/python # # Copyright 2017 University of Southern California # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Client for generating pyramidal tiles. """ import os import subprocess import json import urlparse import sys import traceback import time import shutil import smtplib from email.mime.text import MIMEText import socket import czifile from dateutil.parser import parse from bioformats import BioformatsClient from lxml.etree import XMLSyntaxError from socket import gaierror, EAI_AGAIN from deriva.core import PollingErmrestCatalog, HatracStore, urlquote mail_footer = 'Do not reply to this message. This is an automated message generated by the system, which does not receive email messages.' class PyramidalClient (object): """Network client for generating pyramidal tiles. """ ## Derived from the ermrest iobox service client def __init__(self, **kwargs): self.metadata = kwargs.get("metadata") self.baseuri = kwargs.get("baseuri") o = urlparse.urlparse(self.baseuri) self.scheme = o[0] host_port = o[1].split(":") self.host = host_port[0] self.path = o.path self.port = None if len(host_port) > 1: self.port = host_port[1] self.dzi = kwargs.get("dzi") self.thumbnails = kwargs.get("thumbnails") self.czi2dzi = kwargs.get("czi2dzi") self.viewer = kwargs.get("viewer") self.czirules = kwargs.get("czirules") self.showinf = kwargs.get("showinf") self.data_scratch = kwargs.get("data_scratch") self.cookie = kwargs.get("cookie") self.store = HatracStore( self.scheme, self.host, {'cookie': self.cookie} ) self.catalog = PollingErmrestCatalog( self.scheme, self.host, self.path.split('/')[-1], {'cookie': self.cookie} ) self.mail_server = kwargs.get("mail_server") self.mail_sender = kwargs.get("mail_sender") self.mail_receiver = kwargs.get("mail_receiver") self.logger = kwargs.get("logger") self.logger.debug('Client initialized.') """ Send email notification """ def sendMail(self, subject, text): if self.mail_server and self.mail_sender and self.mail_receiver: retry = 0 ready = False while not ready: try: msg = MIMEText('%s\n\n%s' % (text, mail_footer), 'plain') msg['Subject'] = subject msg['From'] = self.mail_sender msg['To'] = self.mail_receiver s = smtplib.SMTP(self.mail_server) s.sendmail(self.mail_sender, self.mail_receiver.split(','), msg.as_string()) s.quit() self.logger.debug('Sent email notification.') ready = True except socket.gaierror as e: if e.errno == socket.EAI_AGAIN: time.sleep(100) retry = retry + 1 ready = retry > 10 else: ready = True if ready: et, ev, tb = sys.exc_info() self.logger.error('got exception "%s"' % str(ev)) self.logger.error('%s' % str(traceback.format_exception(et, ev, tb))) except: et, ev, tb = sys.exc_info() self.logger.error('got exception "%s"' % str(ev)) self.logger.error('%s' % str(traceback.format_exception(et, ev, tb))) ready = True """ Start the process for generating pyramidal tiles """ def start(self): try: self.processHistologicalImages() except: et, ev, tb = sys.exc_info() self.logger.error('got unexpected exception "%s"' % str(ev)) self.logger.error('%s' % str(traceback.format_exception(et, ev, tb))) self.sendMail('FAILURE Tiles: unexpected exception', '%s\nThe process might have been stopped\n' % str(traceback.format_exception(et, ev, tb))) raise def processHistologicalImages(self): """ Query for detecting new slides - the most recently first """ url = '/entity/Histological_Images:HE_Slide/!File_Bytes::null::&Pyramid_URL::null::/Processing_Status=in%%20progress;Processing_Status::null::@sort(%s::desc::)' % (urlquote('RCT')) resp = self.catalog.get(url) resp.raise_for_status() slides = resp.json() slideids = [] for slide in slides: slideids.append((slide['ID'], slide['Filename'], slide['File_URL'], slide['RCT'], slide['File_MD5'], slide['Name'], slide['RID'])) self.logger.debug('Processing %d HistologicalImages slides(s).' % (len(slideids))) for slideId,filename,file_url,creation_time,md5,name,rid in slideids: self.logger.debug('Generating pyramidal tiles for the file "%s"' % (filename)) """ Extract the file from hatrac """ f = self.getHatracFile(filename, file_url) if f == None: continue """ Create the directory for the tiles """ year = parse(creation_time).strftime("%Y") outdir = '%s/%s/%s' % (self.dzi, year, md5) if not os.path.exists(outdir): os.makedirs(outdir) """ Convert the file to DZI """ returncode = self.convert2dzi(f, outdir) if returncode != 0: """ Update the slide table with the failure result. """ self.updateAttributes('Histological_Images:HE_Slide', rid, ["Thumbnail", "Processing_Status"], {'RID': rid, 'Thumbnail': '/thumbnails/generic/generic_genetic.png', 'Processing_Status': 'czi2dzi error' }) continue """ Generate the thumbnail """ thumbnail,urls = self.writeThumbnailImage(f, year, md5) if thumbnail == None: """ Update the slide table with the failure result. """ self.updateAttributes('Histological_Images:HE_Slide', rid, ["Thumbnail", "Processing_Status"], {'RID': rid, 'Thumbnail': '/thumbnails/generic/generic_genetic.png', 'Processing_Status': 'DZI failure' }) continue """ Extract the metadata """ self.logger.debug('Extracting metadata for filename "%s"' % (filename)) bioformatsClient = BioformatsClient(showinf=self.showinf, \ czirules=self.czirules, \ cziFile=f, \ logger=self.logger) try: metadata = bioformatsClient.getMetadata() if metadata == None: metadata = {} self.logger.debug('Metadata: "%s"' % str(metadata)) os.remove('temp.xml') except XMLSyntaxError: et, ev, tb = sys.exc_info() self.logger.error('got unexpected exception "%s"' % str(ev)) self.logger.error('%s' % str(traceback.format_exception(et, ev, tb))) self.sendMail('FAILURE Tiles: XMLSyntaxError', '%s\n' % str(traceback.format_exception(et, ev, tb))) metadata = {} os.remove(f) """ Update the slide table with the success result. """ self.updateAttributes('Histological_Images:HE_Slide', rid, ["Thumbnail","Pyramid_URL","Processing_Status","uri"], {'RID': rid, 'Thumbnail': thumbnail, 'Pyramid_URL': '/%s?%s' % (self.viewer, urls), 'uri': '/%s?%s' % (self.viewer, urls), "Processing_Status": 'success' }) self.logger.debug('SUCCEEDED created the tiles directory for the file "%s".' % (filename)) """ Update/Create the image entry with the metadata """ obj = {} obj['ID'] = slideId obj['Name'] = name obj['url'] = '/chaise/viewer/#2/Histological_Images:HE_Slide/ID=%d' % slideId columns = ['ID', 'Name', 'url'] for col in self.metadata: if col in metadata and metadata[col] != None: columns.append(col) obj[col] = metadata[col] """ Check if we have an update or create """ rid = self.getRID('Histological_Images:HE_Image', 'ID=%d' % slideId) if rid != None: obj['RID'] = rid self.updateAttributes('Histological_Images:HE_Image', rid, columns, obj ) else: self.createEntity('Histological_Images:HE_Image', obj) self.logger.debug('SUCCEEDED created the image entry for the file "%s".' % (filename)) self.logger.debug('Ended HistologicalImages Slides Processing.') """ Extract the file from hatrac """ def getHatracFile(self, filename, file_url): try: hatracFile = '%s/%s' % (self.data_scratch, filename) self.store.get_obj(file_url, destfilename=hatracFile) self.logger.debug('File "%s", %d bytes.' % (hatracFile, os.stat(hatracFile).st_size)) return hatracFile except: et, ev, tb = sys.exc_info() self.logger.error('got unexpected exception "%s"' % str(ev)) self.logger.error('%s' % str(traceback.format_exception(et, ev, tb))) self.sendMail('FAILURE Tiles: write thumbnail ERROR', '%s\n' % str(traceback.format_exception(et, ev, tb))) return None """ Generate the thumbnail """ def writeThumbnailImage(self, filename, year, md5): try: scanDir='%s/%s/%s' % (self.dzi, year, md5) channels = [] for channel in os.listdir(scanDir): if os.path.isdir('%s%s%s' % (scanDir, os.sep, channel)): channels.append( channel) outdir = '%s/%s' % (self.thumbnails, year) if not os.path.exists(outdir): os.makedirs(outdir) shutil.copyfile('%s/%s/%s/%s/0/0_0.jpg' % (self.dzi, year, md5, channels[0]), '%s/%s.jpg' % (outdir, md5)) thumbnail = '/thumbnails/%s/%s.jpg' % (urlquote(year), urlquote(md5)) urls = [] for channel in channels: urls.append('url=/data/%s/%s/%s/ImageProperties.xml' % (year, md5, channel)) return (thumbnail, '&'.join(urls)) except: et, ev, tb = sys.exc_info() self.logger.error('got unexpected exception "%s"' % str(ev)) self.logger.error('%s' % str(traceback.format_exception(et, ev, tb))) self.sendMail('FAILURE Tiles: write thumbnail ERROR', '%s\n' % str(traceback.format_exception(et, ev, tb))) os.remove(filename) return (None, None) """ Convert the input file to DZI """ def convert2dzi(self, filename, outdir): try: currentDirectory=os.getcwd() os.chdir(self.dzi) args = [self.czi2dzi, filename, outdir] p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdoutdata, stderrdata = p.communicate() returncode = p.returncode os.chdir(currentDirectory) if returncode != 0: self.logger.error('Can not convert czi to dzi for file "%s".\nstdoutdata: %s\nstderrdata: %s\n' % (filename, stdoutdata, stderrdata)) self.sendMail('FAILURE Tiles', 'Can not convert czi to dzi for file "%s".\nstdoutdata: %s\nstderrdata: %s\n' % (filename, stdoutdata, stderrdata)) os.remove(filename) except: et, ev, tb = sys.exc_info() self.logger.error('got unexpected exception "%s"' % str(ev)) self.logger.error('%s' % str(traceback.format_exception(et, ev, tb))) self.sendMail('FAILURE Tiles: czi2dzi ERROR', '%s\n' % str(traceback.format_exception(et, ev, tb))) os.chdir(currentDirectory) self.logger.error('Can not generate pyramidal tiles for the file "%s".\nstdoutdata: %s\nstderrdata: %s\n' % (filename, stdoutdata, stderrdata)) self.sendMail('FAILURE Tiles', 'Can not generate pyramidal tiles for the file "%s".\nstdoutdata: %s\nstderrdata: %s\n' % (filename, stdoutdata, stderrdata)) returncode = 1 return returncode """ Update the ermrest attributes """ def updateAttributes (self, path, rid, columns, row): """ Update the ermrest attributes with the row values. """ try: columns = ','.join([urlquote(col) for col in columns]) url = '/attributegroup/%s/RID;%s' % (path, columns) resp = self.catalog.put( url, json=[row] ) resp.raise_for_status() self.logger.debug('SUCCEEDED updated the table "%s" for the RID "%s" with "%s".' % (path, rid, json.dumps(row, indent=4))) except: et, ev, tb = sys.exc_info() self.logger.error('got exception "%s"' % str(ev)) self.logger.error('%s' % str(traceback.format_exception(et, ev, tb))) self.sendMail('FAILURE Tiles: reportFailure ERROR', '%s\n' % str(traceback.format_exception(et, ev, tb))) """ Insert a row in a table """ def createEntity (self, path, row): """ Insert the row in the table. """ try: url = '/entity/%s' % (path) resp = self.catalog.post( url, json=[row] ) resp.raise_for_status() self.logger.debug('SUCCEEDED created in the table "%s" the entry "%s".' % (path, json.dumps(row, indent=4))) except: et, ev, tb = sys.exc_info() self.logger.error('got exception "%s"' % str(ev)) self.logger.error('%s' % str(traceback.format_exception(et, ev, tb))) self.sendMail('FAILURE Tiles: reportFailure ERROR', '%s\n' % str(traceback.format_exception(et, ev, tb))) """ Check if an entry exist in the
logging.warning("Sending of notification failed!") logging.warning("Exception type: %s" % (str(type(e)))) logging.warning(e) if not resp.ok: logging.error( "Sending of notification data for %s failed: (%d, %s)" % (model.installation_id, resp.status_code, resp.text) ) logging.error("Request body:") logging.error("%s" % (json.dumps(body))) else: logging.info("Appliance detection notification sent: %s" % (json.dumps(body))) else: logging.info("Notification: %s", json.dumps(body)) def _mqtt(self): """ Thread for data acquisition from the mqtt broker. """ def on_connect(client, userdata, flags, rc): print("MQTT connection returned " + mqtt.connack_string(rc)) def on_log(client, userdata, level, buf): print(buf) def on_disconnect(client, userdata, rc): logging.info("Disconnected With Result Code: %s " % rc) if rc != 0: logging.info('Unexpected MQTT disconnection. Will' 'auto-reconnect') retries = 1 success = False # Disable immediate clustering after reconnect # TODO: Is there a better way? # DELETE THIS # for m in self._models: # m._last_clustering_ts = datetime.datetime.now() while not success: try: broker = self._config['MQTT']['broker'] port = int(self._config['MQTT']['port']) logging.info("Trying to Reconnect") client.reinitialise(clean_session=True) client.connect(broker, port) success = True except Exception as e: wait = retries * 60 logging.info("Error in Retrying to Connect with Broker") logging.warning("Exception type: %s" % (str(type(e)))) logging.warning(e) time.sleep(wait) retries += 1 def on_message(client, userdata, message): x = message.topic.split('/') inst_id = x[1] if not self._accept_inst(inst_id): return msg = message.payload.decode('utf-8') # Convert message payload to pandas dataframe try: msg_d = json.loads(msg.replace('\'', '\"')) except Exception as e: logging.error("Exception occurred while decoding message:") logging.error(msg) logging.error(e) logging.error("Ignoring data") return data = pd.DataFrame(msg_d, index=[0]) data.rename(columns={"ts": "timestamp", "p": "active", "q": "reactive", "i": "current", "v": "voltage", "f": "frequency"}, inplace=True) data.set_index("timestamp", inplace=True, drop=True) data.index = pd.to_datetime(data.index, unit='s') data.index.name = None # Submit for processing by the model with self._model_lock[inst_id]: if self._recomputation_active[inst_id]: # If recomputation is active, keep data in a buffer for up # to MAX_REC_BUFFER entries logging.info('Recomputation is active, data in buffer') if self._recomputation_buffer[inst_id] is None: self._recomputation_buffer[inst_id] = data else: if len(self._recomputation_buffer[inst_id].index) > \ self.MAX_REC_BUFFER: logging.info('Max buffer size reached, ignoring') pass else: self._recomputation_buffer[inst_id].append(data) else: model = self._models[inst_id] if self._recomputation_buffer[inst_id] is not None: # There's data in the buffer, process these first logging.info('Processing buffer data') model.update(self._recomputation_buffer[inst_id]) self._recomputation_buffer[inst_id] = None model._reset() # TODO: ? # self._put_count[inst_id] = 1 else: logging.debug('NILM lock (MQTT message) %s %s' % (inst_id, datetime.datetime.now())) # Process the data model.update(data) self._put_count[inst_id] += 1 logging.debug('NILM unlock (MQTT message) %s %s' % (inst_id, datetime.datetime.now())) time.sleep(0.01) # Notify orchestrator for appliance detection if not self._recomputation_active[inst_id]: self._handle_notifications(self._models[inst_id]) # It is possible that _store_model cannot store, and keeps this to # True afterwards if self._put_count[inst_id] % self.STORE_PERIOD == 0: self._store_flag = True if self._store_flag: with self._model_lock[inst_id]: # Persistent storage self._store_model(inst_id) # Prepare the models (As it stands, it shouldn't do anything) logging.info('Loading models from database (MQTT) thread') for inst_id in self._inst_list: logging.info('Loading %s' % (inst_id)) self._load_or_create_model(inst_id) # Avoid clustering immediately after model load. # TODO: Better way to do this? # DELETE THIS # self._models[inst_id] = datetime.datetime.now() # Connect to MQTT ca = self._config['MQTT']['ca'] key = self._config['MQTT']['key'] crt = self._config['MQTT']['crt'] broker = self._config['MQTT']['broker'] port = int(self._config['MQTT']['port']) topic_prefix = self._config['MQTT']['topic_prefix'] # This causes problems with reconnections, for now just revert to an # empty id # if self._config['MQTT']['identity'] == "random": # # Option for random identity: # identity = "nilm" + str(int(np.random.rand() * 1000000)) # else: # identity = self._config['MQTT']['identity'] clean_session = self._config['MQTT'].getboolean('clean_session') # client = mqtt.Client(identity, clean_session=clean_session) client = mqtt.Client(clean_session=clean_session) client.tls_set(ca_certs=ca, keyfile=key, certfile=crt) client.tls_insecure_set(True) client.on_connect = on_connect # client.on_disconnect = on_disconnect client.on_log = on_log client.on_message = on_message client.connect(broker, port=port, keepalive=180) # Subscribe sub_list = [(topic_prefix + "/" + x, 2) for x in self._inst_list] client.subscribe(sub_list) # These timers may not be needed time.sleep(3) client.loop_forever() def _process_file(self): if len(self._inst_list) > 1: raise ValueError('Only one installation is supported in file mode') inst_id = self._inst_list[0] self._load_or_create_model(inst_id) model = self._models[inst_id] power = eeris.read_eeris(self._input_file_prefix, self._file_date_start, self._file_date_end) step = 3 # Hardcoded logging_step = 7200 start_ts = pd.Timestamp(self._file_date_start) power = power.loc[power.index > start_ts].dropna() end = power.shape[0] - power.shape[0] % step for i in range(0, end, step): data = power.iloc[i:i+step] model.update(data) if (i + step) % logging_step == 0: logging.debug('Processed %d seconds' % (i + step)) # Model storage if ((self._put_count[inst_id] // step) % self.STORE_PERIOD == 0): self._store_flag = True if self._store_flag: # Persistent storage self._store_model(inst_id) time.sleep(0.01) self._handle_notifications(model) self._put_count[inst_id] += step def _prepare_response_body(self, model): """ Wrapper function """ body = None if self._response == 'cenote': body = self._prepare_response_body_cenote(model) elif self._response == 'debug': body = self._prepare_response_body_debug(model) return body def _prepare_response_body_cenote(self, model): """ Prepare a response according to the specifications of Cenote. Check https://authecesofteng.github.io/cenote/ for more information. """ # Special case, when recomputation is active. inst_id = model.installation_id if self._recomputation_active[inst_id]: payload = [] app_d = {"_id": '000000000000000000000003', "name": "Recomputation in progress", "type": "recomputation", "active": 0.0, "reactive": 0.0} ts = dt.datetime.now().timestamp() * 1000 d = {"data": app_d, "timestamp": ts} payload.append(d) body_d = {"installation_id": str(inst_id), "payload": payload} body = json.dumps(body_d) return body # ts = dt.datetime.now().timestamp() * 1000 if model.last_processed_ts is not None: ts = model.last_processed_ts.timestamp() * 1000 else: payload = [] body_d = {"installation_id": str(inst_id), "payload": payload} return json.dumps(body_d) payload = [] # Insert background if model.is_background_overestimated(): app_d = {"_id": '000000000000000000000002', "name": "Other", "type": "residual", "active": model.running_avg_power[0], "reactive": model.running_avg_power[1]} d = {"data": app_d, "timestamp": ts} payload.append(d) else: app_d = {"_id": '000000000000000000000001', "name": "Background", "type": "background", "active": model.background_active, "reactive": 0.0} d = {"data": app_d, "timestamp": ts} payload.append(d) for i in range(len(model.live)): app = model.live[i] app_d = {"_id": app.appliance_id, "name": app.name, "type": app.category, "active": app.signature[0, 0], "reactive": app.signature[0, 1]} d = {"data": app_d, "timestamp": ts} payload.append(d) # We ignore residuals under 5 Watts. if model.residual_live[0] > 5.0 and model.background_active < 10000: app_d = {"_id": '000000000000000000000002', "name": "Other", "type": "residual", "active": model.residual_live[0], "reactive": model.residual_live[1]} d = {"data": app_d, "timestamp": ts} payload.append(d) body_d = {"installation_id": str(inst_id), "payload": payload} try: body = json.dumps(body_d) except (ValueError, TypeError): logging.error("Error while preparing response body:") logging.error(body_d['installation_id']) logging.error(body_d['payload']) for k in body_d['payload']: logging.error(body_d[k]) raise return body def _prepare_response_body_debug(self, model, lret=5): """ DO NOT USE. NEEDS REFACTORING Helper function to prepare response body. lret is the length of the returned _yest array (used for development/debugging, ignore it in production). """ return None # TODO Refactor according to new "live". live = model.live[['name', 'active', 'reactive']].to_json() ts = dt.datetime.now().strftime('%Y-%m-%dT%H:%M:%S%z') body = '''{ "timestamp": "%s", "appliances": %s, "edge_detected": %s, "edge_size": [%.2f, %.2f], "_yest": %s }''' % (ts, live, str(model.online_edge_detected).lower(), model.online_edge[0], model.online_edge[1], model._yest[-lret:].tolist()) return body # TODO: This is probably a better implementation, should replace current # after testing. def _store_model_NEXT_VERSION(self, inst_id): """ Helper function to store a model in the database. WARNING: This function assumes that the model is already loaded. Also, is NOT thread safe, never call it directly unless you know what you're doing. """ model = self._models[inst_id].deepcopy() model.clustering_thread = None model._lock = None modelstr = dill.dumps(model) upd = {'$set': { 'meterId': inst_id, 'lastUpdate': str(dt.datetime.now()), 'debugInstallation': True, 'modelHart': modelstr} } self._mdb.models.update_one({'meterId': inst_id}, upd) self._store_flag = False # TODO: This will probably be deprecated def _store_model(self, inst_id): """ Helper function to store a model in the database. WARNING: This function assumes that the model is already loaded. Also, is NOT thread safe, never call it directly unless you know what you're doing. """ model = self._models[inst_id] if model.is_clustering_active(): # Cannot store at this point logging.debug('Clustering thread active for %s, do not store' % (inst_id)) return modelstr = dill.dumps(model) upd = {'$set': { 'meterId': inst_id, 'lastUpdate': str(dt.datetime.now()), 'debugInstallation': True, 'modelHart': modelstr} } self._mdb.models.update_one({'meterId': inst_id}, upd) self._store_flag = False # logging.debug('Stored model for %s' % (inst_id)) def _recompute_model(self, inst_id, start_ts, end_ts, step=6 * 3600, warmup_period=2*3600, use_notifications=False): """ Recompute a model from data provided by a service. Variations of this routine can be created for different data sources. Parameters ---------- inst_id: str Installation id whose model we wan to recompute start_ts : int Start timestamp in seconds since UNIX epoch end_ts : int End timestamp in seconds since UNIX epoch step : int Step, in seconds to use for calculations warmup_period : int How many seconds to operate in non-batch mode with 3-second data frames, in order to prepare for the appropriate live operation. """ # TODO: Use
from copy import deepcopy import logging import sys import numpy as np import pytest from affine import Affine import rasterio from rasterio.enums import MergeAlg from rasterio.errors import WindowError, ShapeSkipWarning from rasterio.features import ( bounds, geometry_mask, geometry_window, is_valid_geom, rasterize, sieve, shapes) from .conftest import MockGeoInterface DEFAULT_SHAPE = (10, 10) def test_bounds_point(): g = {'type': 'Point', 'coordinates': [10, 10]} assert bounds(g) == (10, 10, 10, 10) assert bounds(MockGeoInterface(g)) == (10, 10, 10, 10) def test_bounds_line(): g = {'type': 'LineString', 'coordinates': [[0, 0], [10, 10]]} assert bounds(g) == (0, 0, 10, 10) assert bounds(MockGeoInterface(g)) == (0, 0, 10, 10) def test_bounds_ring(): g = {'type': 'LinearRing', 'coordinates': [[0, 0], [10, 10], [10, 0]]} assert bounds(g) == (0, 0, 10, 10) assert bounds(MockGeoInterface(g)) == (0, 0, 10, 10) def test_bounds_polygon(): g = {'type': 'Polygon', 'coordinates': [[[0, 0], [10, 10], [10, 0]]]} assert bounds(g) == (0, 0, 10, 10) assert bounds(MockGeoInterface(g)) == (0, 0, 10, 10) def test_bounds_z(): g = {'type': 'Point', 'coordinates': [10, 10, 10]} assert bounds(g) == (10, 10, 10, 10) assert bounds(MockGeoInterface(g)) == (10, 10, 10, 10) @pytest.mark.parametrize('geometry', [ {'type': 'Polygon'}, {'type': 'Polygon', 'not_coordinates': []}, {'type': 'bogus', 'not_coordinates': []}, { 'type': 'GeometryCollection', 'geometries': [ {'type': 'Point', 'coordinates': [1, 1]}, {'type': 'LineString', 'not_coordinates': [[-10, -20], [10, 20]]}, ] } ]) def test_bounds_invalid_obj(geometry): with pytest.raises(ValueError, match="geometry must be a GeoJSON-like geometry, GeometryCollection, or FeatureCollection"): bounds(geometry) def test_bounds_feature_collection(basic_featurecollection): fc = basic_featurecollection assert bounds(fc) == bounds(fc['features'][0]) == (2, 2, 4.25, 4.25) def test_bounds_geometry_collection(): gc = { 'type': 'GeometryCollection', 'geometries': [ {'type': 'Point', 'coordinates': [1, 1]}, {'type': 'LineString', 'coordinates': [[-10, -20], [10, 20]]}, {'type': 'Polygon', 'coordinates': [[[5, 5], [25, 50], [25, 5]]]} ] } assert bounds(gc) == (-10, -20, 25, 50) assert bounds(MockGeoInterface(gc)) == (-10, -20, 25, 50) def test_bounds_existing_bbox(basic_featurecollection): """Test with existing bbox in geojson. Similar to that produced by rasterio. Values specifically modified here for testing, bboxes are not valid as written. """ fc = basic_featurecollection fc['bbox'] = [0, 10, 10, 20] fc['features'][0]['bbox'] = [0, 100, 10, 200] assert bounds(fc['features'][0]) == (0, 100, 10, 200) assert bounds(fc) == (0, 10, 10, 20) def test_geometry_mask(basic_geometry, basic_image_2x2): assert np.array_equal( basic_image_2x2 == 0, geometry_mask( [basic_geometry], out_shape=DEFAULT_SHAPE, transform=Affine.identity() ) ) def test_geometry_mask_invert(basic_geometry, basic_image_2x2): assert np.array_equal( basic_image_2x2, geometry_mask( [basic_geometry], out_shape=DEFAULT_SHAPE, transform=Affine.identity(), invert=True ) ) @pytest.mark.parametrize("geom", [{'type': 'Invalid'}, {'type': 'Point'}, {'type': 'Point', 'coordinates': []}]) def test_geometry_invalid_geom(geom): """An invalid geometry should fail""" with pytest.raises(ValueError) as exc_info, pytest.warns(ShapeSkipWarning): geometry_mask( [geom], out_shape=DEFAULT_SHAPE, transform=Affine.identity()) assert 'No valid geometry objects found for rasterize' in exc_info.value.args[0] def test_geometry_mask_invalid_shape(basic_geometry): """A width==0 or height==0 should fail with ValueError""" for shape in [(0, 0), (1, 0), (0, 1)]: with pytest.raises(ValueError) as exc_info: geometry_mask( [basic_geometry], out_shape=shape, transform=Affine.identity()) assert 'must be > 0' in exc_info.value.args[0] def test_geometry_mask_no_transform(basic_geometry): with pytest.raises(TypeError): geometry_mask( [basic_geometry], out_shape=DEFAULT_SHAPE, transform=None) def test_geometry_window(basic_image_file, basic_geometry): with rasterio.open(basic_image_file) as src: window = geometry_window(src, [basic_geometry], north_up=False) assert window.flatten() == (2, 2, 3, 3) def test_geometry_window_geo_interface(basic_image_file, basic_geometry): with rasterio.open(basic_image_file) as src: window = geometry_window(src, [MockGeoInterface(basic_geometry)], north_up=False) assert window.flatten() == (2, 2, 3, 3) def test_geometry_window_rotation(rotated_image_file, rotation_geometry): with rasterio.open(rotated_image_file) as src: window = geometry_window(src, [rotation_geometry], rotated=True) assert window.flatten() == (898, 439, 467, 399) def test_geometry_window_pixel_precision(basic_image_file): """Window offsets should be floor, width and height ceiling""" geom2 = { 'type': 'Polygon', 'coordinates': [[ (1.99999, 2), (1.99999, 4.0001), (4.0001, 4.0001), (4.0001, 2), (1.99999, 2) ]] } with rasterio.open(basic_image_file) as src: window = geometry_window(src, [geom2], north_up=False, pixel_precision=6) assert window.flatten() == (1, 2, 4, 3) def test_geometry_window_north_up(path_rgb_byte_tif): geometry = { 'type': 'Polygon', 'coordinates': [[ (200000, 2700000), (200000, 2750000), (250000, 2750000), (250000, 2700000), (200000, 2700000) ]] } with rasterio.open(path_rgb_byte_tif) as src: window = geometry_window(src, [geometry], north_up=True) assert window.flatten() == (326, 256, 168, 167) def test_geometry_window_pad(basic_image_file, basic_geometry): with rasterio.open(basic_image_file) as src: window = geometry_window(src, [basic_geometry], north_up=False, pad_x=0.5, pad_y=0.5) assert window.flatten() == (1, 1, 4, 4) def test_geometry_window_large_shapes(basic_image_file): geometry = { 'type': 'Polygon', 'coordinates': [[ (-2000, -2000), (-2000, 2000), (2000, 2000), (2000, -2000), (-2000, -2000) ]] } with rasterio.open(basic_image_file) as src: window = geometry_window(src, [geometry], north_up=False) assert window.flatten() == (0, 0, src.height, src.width) def test_geometry_window_no_overlap(path_rgb_byte_tif, basic_geometry): """Geometries that do not overlap raster raises WindowError""" with rasterio.open(path_rgb_byte_tif) as src: with pytest.raises(WindowError): geometry_window(src, [basic_geometry], north_up=False) def test_is_valid_geo_interface(geojson_point): """Properly formed Point object with geo interface is valid""" assert is_valid_geom(MockGeoInterface(geojson_point)) def test_is_valid_geom_point(geojson_point): """Properly formed GeoJSON Point is valid""" assert is_valid_geom(geojson_point) # Empty coordinates are invalid geojson_point['coordinates'] = [] assert not is_valid_geom(geojson_point) def test_is_valid_geom_multipoint(geojson_multipoint): """Properly formed GeoJSON MultiPoint is valid""" assert is_valid_geom(geojson_multipoint) # Empty iterable is invalid geom = deepcopy(geojson_multipoint) geom['coordinates'] = [] assert not is_valid_geom(geom) # Empty first coordinate is invalid geom = deepcopy(geojson_multipoint) geom['coordinates'] = [[]] def test_is_valid_geom_line(geojson_line): """Properly formed GeoJSON LineString is valid""" assert is_valid_geom(geojson_line) # Empty iterable is invalid geom = deepcopy(geojson_line) geom['coordinates'] = [] assert not is_valid_geom(geom) # Empty first coordinate is invalid geom = deepcopy(geojson_line) geom['coordinates'] = [[]] def test_is_valid_geom_multiline(geojson_line): """Properly formed GeoJSON MultiLineString is valid""" assert is_valid_geom(geojson_line) # Empty iterables are invalid geom = deepcopy(geojson_line) geom['coordinates'] = [] assert not is_valid_geom(geom) geom = deepcopy(geojson_line) geom['coordinates'] = [[]] assert not is_valid_geom(geom) # Empty first coordinate is invalid geom = deepcopy(geojson_line) geom['coordinates'] = [[[]]] assert not is_valid_geom(geom) def test_is_valid_geom_polygon(geojson_polygon): """Properly formed GeoJSON Polygon is valid""" assert is_valid_geom(geojson_polygon) # Empty iterables are invalid geom = deepcopy(geojson_polygon) geom['coordinates'] = [] assert not is_valid_geom(geom) geom = deepcopy(geojson_polygon) geom['coordinates'] = [[]] assert not is_valid_geom(geom) # Empty first coordinate is invalid geom = deepcopy(geojson_polygon) geom['coordinates'] = [[[]]] assert not is_valid_geom(geom) def test_is_valid_geom_ring(geojson_polygon): """Properly formed GeoJSON LinearRing is valid""" geojson_ring = deepcopy(geojson_polygon) geojson_ring['type'] = 'LinearRing' # take first ring from polygon as sample geojson_ring['coordinates'] = geojson_ring['coordinates'][0] assert is_valid_geom(geojson_ring) # Empty iterables are invalid geom = deepcopy(geojson_ring) geom['coordinates'] = [] assert not is_valid_geom(geom) geom = deepcopy(geojson_ring) geom['coordinates'] = [[]] assert not is_valid_geom(geom) def test_is_valid_geom_multipolygon(geojson_multipolygon): """Properly formed GeoJSON MultiPolygon is valid""" assert is_valid_geom(geojson_multipolygon) # Empty iterables are invalid geom = deepcopy(geojson_multipolygon) geom['coordinates'] = [] assert not is_valid_geom(geom) geom = deepcopy(geojson_multipolygon) geom['coordinates'] = [[]] assert not is_valid_geom(geom) geom = deepcopy(geojson_multipolygon) geom['coordinates'] = [[[]]] assert not is_valid_geom(geom) # Empty first coordinate is invalid geom = deepcopy(geojson_multipolygon) geom['coordinates'] = [[[[]]]] assert not is_valid_geom(geom) def test_is_valid_geom_geomcollection(geojson_geomcollection): """Properly formed GeoJSON GeometryCollection is valid""" assert is_valid_geom(geojson_geomcollection) # Empty GeometryCollection is invalid geom = deepcopy(geojson_geomcollection) geom['geometries'] = [] assert not is_valid_geom(geom) @pytest.mark.parametrize("geom", [None, 1, "foo", "type", ["type"], {"type": "Invalid"}, {"type": "Point"}]) def test_is_valid_geom_invalid_inputs(geom): """Improperly formed GeoJSON objects should fail""" assert not is_valid_geom(geom) def test_rasterize_point(geojson_point): expected = np.zeros(shape=DEFAULT_SHAPE, dtype='uint8') expected[2, 2] = 1 assert np.array_equal( rasterize([geojson_point], out_shape=DEFAULT_SHAPE), expected ) def test_rasterize_multipoint(geojson_multipoint): expected = np.zeros(shape=DEFAULT_SHAPE, dtype='uint8') expected[2, 2] = 1 expected[4, 4] = 1 assert np.array_equal( rasterize([geojson_multipoint], out_shape=DEFAULT_SHAPE), expected ) def test_rasterize_line(geojson_line): expected = np.zeros(shape=DEFAULT_SHAPE, dtype='uint8') expected[2, 2] = 1 expected[3, 3] = 1 expected[4, 4] = 1 assert np.array_equal( rasterize([geojson_line], out_shape=DEFAULT_SHAPE), expected ) def test_rasterize_multiline(geojson_multiline): expected = np.zeros(shape=DEFAULT_SHAPE, dtype='uint8') expected[2, 2] = 1 expected[3, 3] = 1 expected[4, 4] = 1 expected[0, 0:5] = 1 assert np.array_equal( rasterize([geojson_multiline], out_shape=DEFAULT_SHAPE), expected ) def test_rasterize_polygon(geojson_polygon, basic_image_2x2): assert np.array_equal( rasterize([geojson_polygon], out_shape=DEFAULT_SHAPE), basic_image_2x2 ) def test_rasterize_multipolygon(geojson_multipolygon): expected = np.zeros(shape=DEFAULT_SHAPE, dtype='uint8') expected[0:1, 0:1] = 1 expected[2:4, 2:4] = 1 assert np.array_equal( rasterize([geojson_multipolygon], out_shape=DEFAULT_SHAPE), expected ) def test_rasterize_geomcollection(geojson_geomcollection): expected = np.zeros(shape=DEFAULT_SHAPE, dtype='uint8') expected[0:1, 0:1] = 1 expected[2:4, 2:4] = 1 assert np.array_equal( rasterize([geojson_geomcollection], out_shape=DEFAULT_SHAPE), expected ) def test_rasterize_geo_interface(geojson_polygon, basic_image_2x2): assert np.array_equal( rasterize([MockGeoInterface(geojson_polygon)], out_shape=DEFAULT_SHAPE), basic_image_2x2 ) def test_rasterize_geomcollection_no_hole(): """ Make sure that bug reported in https://github.com/mapbox/rasterio/issues/1253 does not recur. GeometryCollections are flattened to individual parts, and should result in no holes where parts overlap. """ geomcollection = {'type': 'GeometryCollection', 'geometries': [ {'type': 'Polygon', 'coordinates': (((0, 0), (0, 5), (5, 5), (5, 0), (0, 0)),)}, {'type': 'Polygon', 'coordinates': (((2, 2), (2, 7), (7, 7), (7, 2), (2, 2)),)} ]} expected = rasterize(geomcollection['geometries'], out_shape=DEFAULT_SHAPE) assert np.array_equal( rasterize([geomcollection], out_shape=DEFAULT_SHAPE), expected ) @pytest.mark.parametrize("input", [ [{'type'}], [{'type': 'Invalid'}], [{'type': 'Point'}], [{'type': 'Point', 'coordinates': []}], [{'type': 'GeometryCollection', 'geometries': []}]]) def test_rasterize_invalid_geom(input): """Invalid GeoJSON should fail with exception""" with pytest.raises(ValueError), pytest.warns(ShapeSkipWarning): rasterize(input, out_shape=DEFAULT_SHAPE) def test_rasterize_skip_invalid_geom(geojson_polygon, basic_image_2x2): """Rasterize operation should succeed for at least one valid geometry and should skip any invalid or empty geometries with an error.""" with pytest.warns(UserWarning, match="Invalid or empty shape"): out = rasterize([geojson_polygon, {'type': 'Polygon', 'coordinates': []}], out_shape=DEFAULT_SHAPE) assert np.array_equal(out, basic_image_2x2) def test_rasterize_out_image(basic_geometry, basic_image_2x2): """Rasterize operation should succeed for an out image.""" out = np.zeros(DEFAULT_SHAPE) rasterize([basic_geometry], out=out) assert np.array_equal(basic_image_2x2, out) def test_rasterize_invalid_out_dtype(basic_geometry): """A non-supported data type for out should raise an exception.""" out = np.zeros(DEFAULT_SHAPE, dtype=np.int64) with pytest.raises(ValueError): rasterize([basic_geometry], out=out) def test_rasterize_shapes_out_dtype_mismatch(basic_geometry): """Shape
""" Set up the plot figures, axes, and items to be done for each frame. This module is imported by the plotting routines and then the function setplot is called to set the plot parameters. """ from __future__ import absolute_import from __future__ import print_function import numpy as np import matplotlib.pyplot as plt from clawpack.geoclaw import topotools from six.moves import range import os,sys new_code = '../../new_python' if 'new_python' not in sys.path[0] + sys.path[1]: print('sys.path[0] = ',sys.path[0]) print('Adding %s to path' % new_code) sys.path.insert(0, new_code) sea_level = 3.1 # lake elevation, should agree with setrun # adjust these values if needed for different size tsunamis: ylim_transects = [-5,10] # y-axis limits for transect plots cmax_land = 40. # for color scale of land (greens) camplitude = 2. # for color scale on planview plots # make symmetric about sea_level: cmin = sea_level-camplitude cmax = sea_level+camplitude def surface_or_depth_lake(current_data): """ Return a masked array containing the surface elevation where the topo is below sea level or the water depth where the topo is above sea level. Mask out dry cells. Assumes sea level is at topo=0. Surface is eta = h+topo, assumed to be output as 4th column of fort.q files. Modified from visclaw.geoplot version to use sea_level """ drytol = getattr(current_data.user, 'drytol', 1e-3) q = current_data.q h = q[0,:,:] eta = q[3,:,:] topo = eta - h # With this version, the land is transparent. surface_or_depth = np.ma.masked_where(h <= drytol, np.where(topo<sea_level, eta, h)) try: # Use mask covering coarse regions if it's set: m = current_data.mask_coarse surface_or_depth = np.ma.masked_where(m, surface_or_depth) except: pass return surface_or_depth #-------------------------- def setplot(plotdata=None): #-------------------------- """ Specify what is to be plotted at each frame. Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData. Output: a modified version of plotdata. """ from clawpack.visclaw import colormaps, geoplot from numpy import linspace if plotdata is None: from clawpack.visclaw.data import ClawPlotData plotdata = ClawPlotData() plotdata.clearfigures() # clear any old figures,axes,items data plotdata.format = 'binary' def timeformat(t): from numpy import mod hours = int(t/3600.) tmin = mod(t,3600.) min = int(tmin/60.) sec = int(mod(tmin,60.)) timestr = '%s:%s:%s' % (hours,str(min).zfill(2),str(sec).zfill(2)) return timestr def title_hours(current_data): from pylab import title t = current_data.t timestr = timeformat(t) title('%s after earthquake' % timestr) #----------------------------------------- # Figure for surface #----------------------------------------- plotfigure = plotdata.new_plotfigure(name='Domain and transects', figno=0) plotfigure.kwargs = {'figsize':(11,7)} plotfigure.show = True # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes('pcolor') #plotaxes.axescmd = 'axes([.1,.4,.8,.5])' plotaxes.axescmd = 'axes([.1,.1,.4,.8])' plotaxes.title = 'Surface' #plotaxes.xlimits = [-122.4, -122.16] #plotaxes.ylimits = [47.4, 47.8] x1_tr1 = -122.29 x2_tr1 = -122.215 y1_tr1 = 47.57 y2_tr1 = 47.705 x1_tr2 = -122.21 x2_tr2 = -122.265 y1_tr2 = 47.4925 y2_tr2 = 47.545 def aa_transects(current_data): from pylab import ticklabel_format, xticks, plot, text, gca,cos,pi title_hours(current_data) ticklabel_format(useOffset=False) xticks(rotation=20) plot([x1_tr1, x2_tr1], [y1_tr1, y2_tr1], 'w') plot([x1_tr2, x2_tr2], [y1_tr2, y2_tr2], 'w') text(x2_tr1-0.01,y2_tr1+0.005,'Transect 1',color='w',fontsize=8) text(x1_tr2-0.01,y1_tr2-0.008,'Transect 2',color='w',fontsize=8) gca().set_aspect(1./cos(48*pi/180.)) #addgauges(current_data) plotaxes.afteraxes = aa_transects # Water plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') #plotitem.plot_var = geoplot.surface plotitem.plot_var = surface_or_depth_lake plotitem.pcolor_cmap = geoplot.tsunami_colormap plotitem.pcolor_cmin = cmin plotitem.pcolor_cmax = cmax plotitem.add_colorbar = True plotitem.amr_celledges_show = [0,0,0] plotitem.amr_patchedges_show = [0,0,0,0] plotitem.amr_data_show = [1,1,1,1,1,0,0] # Land plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') plotitem.plot_var = geoplot.land plotitem.pcolor_cmap = geoplot.land_colors plotitem.pcolor_cmin = 0.0 plotitem.pcolor_cmax = cmax_land plotitem.add_colorbar = False plotitem.amr_celledges_show = [0] plotitem.amr_patchedges_show = [0,0,0,0] plotitem.amr_data_show = [1,1,1,1,1,0,0] # add contour lines of bathy if desired: plotitem = plotaxes.new_plotitem(plot_type='2d_contour') #plotitem.show = False plotitem.plot_var = geoplot.topo plotitem.contour_levels = [sea_level] plotitem.amr_contour_colors = ['g'] # color on each level plotitem.kwargs = {'linestyles':'solid','linewidths':0.5} plotitem.amr_contour_show = [0,1,0,0] # only on finest level plotitem.celledges_show = 0 plotitem.patchedges_show = 0 #----------------------------------------- # Plots along transect: #----------------------------------------- eta1 = lambda q: q[3,:,:] B1 = lambda q: q[3,:,:]-q[0,:,:] def plot_xsec(current_data): import matplotlib.pyplot as plt import numpy import gridtools from clawpack.pyclaw import Solution framesoln = current_data.framesoln topo_color = [.8,1,.8] water_color = [.5,.5,1] plt.figure(0) # Transect 1: plt.axes([.55,.5,.4,.3]) xout = numpy.linspace(x1_tr1, x2_tr1, 1000) yout = numpy.linspace(y1_tr1, y2_tr1, 1000) eta = gridtools.grid_output_2d(framesoln, eta1, xout, yout) topo = gridtools.grid_output_2d(framesoln, B1, xout, yout) eta = numpy.where(eta>topo, eta, numpy.nan) plt.fill_between(yout, eta, topo, color=water_color) plt.fill_between(yout, topo, -10000, color=topo_color) plt.plot(yout, eta, 'b') plt.plot(yout, topo, 'g') plt.plot(yout, sea_level + 0*topo, 'k--') #plt.xlim(47.5,47.8) plt.ylim(ylim_transects) plt.ylabel('meters') plt.grid(True) timestr = timeformat(framesoln.t) plt.title('Elevation on Transect 1') # Transect 2: plt.axes([.55,.1,.4,.3]) xout = numpy.linspace(x1_tr2, x2_tr2, 1000) yout = numpy.linspace(y1_tr2, y2_tr2, 1000) eta = gridtools.grid_output_2d(framesoln, eta1, xout, yout) topo = gridtools.grid_output_2d(framesoln, B1, xout, yout) eta = numpy.where(eta>topo, eta, numpy.nan) topo_color = [.8,1,.8] water_color = [.5,.5,1] plt.fill_between(yout, eta, topo, color=water_color) plt.fill_between(yout, topo, -10000, color=topo_color) plt.plot(yout, eta, 'b') plt.plot(yout, topo, 'g') plt.plot(yout, sea_level + 0*topo, 'k--') #plt.xlim(47.5,47.8) plt.ylim(ylim_transects) plt.ylabel('meters') plt.grid(True) timestr = timeformat(framesoln.t) plt.title('Elevation on Transect 2') plotdata.afterframe = plot_xsec #----------------------------------------- # Figure for zoomed area #----------------------------------------- # To use, set the limits as desired and set `plotfigure.show = True` x1,x2,y1,y2 = [-122.23, -122.2, 47.69, 47.71] plotfigure = plotdata.new_plotfigure(name="zoomed area", figno=11) plotfigure.show = False plotfigure.kwargs = {'figsize': (8,7)} # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() plotaxes.scaled = False plotaxes.xlimits = [x1, x2] plotaxes.ylimits = [y1, y2] def aa(current_data): from pylab import ticklabel_format, xticks, gca,cos,pi title_hours(current_data) ticklabel_format(useOffset=False) xticks(rotation=20) gca().set_aspect(1./cos(48*pi/180.)) plotaxes.afteraxes = aa # Water plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') #plotitem.plot_var = geoplot.surface plotitem.plot_var = surface_or_depth_lake plotitem.pcolor_cmap = geoplot.tsunami_colormap plotitem.pcolor_cmin = cmin plotitem.pcolor_cmax = cmax plotitem.add_colorbar = True plotitem.amr_celledges_show = [0,0,0] plotitem.patchedges_show = 0 # Land plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') plotitem.plot_var = geoplot.land plotitem.pcolor_cmap = geoplot.land_colors plotitem.pcolor_cmin = 0.0 plotitem.pcolor_cmax = cmax_land plotitem.add_colorbar = False plotitem.amr_celledges_show = [0] plotitem.patchedges_show = 0 #----------------------------------------- # Figures for gauges #----------------------------------------- time_scale = 1./3600. time_label = 'hours' plotfigure = plotdata.new_plotfigure(name='gauge depth', figno=300, \ type='each_gauge') #plotfigure.clf_each_gauge = False def setglimits_depth(current_data): from pylab import xlim,ylim,title,argmax,show,array,ylabel gaugeno = current_data.gaugeno q = current_data.q depth = q[0,:] t = current_data.t g = current_data.plotdata.getgauge(gaugeno) level = g.level maxlevel = max(level) #find first occurrence of the max of levels used by #this gauge and set the limits based on that time argmax_level = argmax(level) xlim(time_scale*array(t[argmax_level],t[-1])) ylabel('meters') min_depth = depth[argmax_level:].min() max_depth = depth[argmax_level:].max() ylim(min_depth-0.5, max_depth+0.5) title('Gauge %i : Flow Depth (h)\n' % gaugeno + \ 'max(h) = %7.3f, max(level) = %i' %(max_depth,maxlevel)) #show() # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() plotaxes.time_scale = time_scale plotaxes.time_label = time_label # Plot depth as blue curve: plotitem = plotaxes.new_plotitem(plot_type='1d_plot') plotitem.plot_var = 0 plotitem.plotstyle = 'b-' ## Set the limits and the title in the function below plotaxes.afteraxes = setglimits_depth plotfigure = plotdata.new_plotfigure(name='gauge surface eta', figno=301, \ type='each_gauge') #plotfigure.clf_each_gauge = False def setglimits_eta(current_data): from pylab import xlim,ylim,title,argmax,show,array,ylabel gaugeno = current_data.gaugeno q = current_data.q eta = q[3,:] t = current_data.t g = current_data.plotdata.getgauge(gaugeno) level = g.level maxlevel = max(level) #find first occurrence of the max of levels used by #this gauge and set the limits based on that time argmax_level = argmax(level) #first occurrence of it xlim(time_scale*array(t[argmax_level],t[-1])) ylabel('meters') min_eta = eta[argmax_level:].min() max_eta = eta[argmax_level:].max() ylim(min_eta-0.5,max_eta+0.5) title('Gauge %i : Surface Elevation (eta)\n' % gaugeno + \ 'max(eta) = %7.3f, max(level) = %i' %(max_eta,maxlevel)) #show() # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() plotaxes.time_scale = time_scale plotaxes.time_label = time_label # Plot surface (eta) as blue curve: plotitem = plotaxes.new_plotitem(plot_type='1d_plot') plotitem.plot_var = 3 plotitem.plotstyle = 'b-' ## Set the limits and the title in the function below plotaxes.afteraxes = setglimits_eta plotfigure = plotdata.new_plotfigure(name='speed', figno=302, \ type='each_gauge') #plotfigure.clf_each_gauge = False def speed(current_data): from numpy import sqrt, maximum, where q = current_data.q h = q[0,:] hu = q[1,:] hv = q[2,:] s = sqrt(hu**2 + hv**2) / maximum(h,0.001) s = where(h > 0.001, s, 0.0) return s def setglimits_speed(current_data): from pylab import xlim,ylim,title,argmax,show,array,ylabel gaugeno = current_data.gaugeno s = speed(current_data) t = current_data.t g = current_data.plotdata.getgauge(gaugeno) level = g.level maxlevel = max(level) #find first occurrence of the max of levels used by #this gauge and set the limits based on that time argmax_level = argmax(level) #first occurrence of it xlim(time_scale*array(t[argmax_level],t[-1])) ylabel('meters/sec') min_speed = s[argmax_level:].min() max_speed = s[argmax_level:].max() ylim(min_speed-0.5,max_speed+0.5) title('Gauge %i : Speed (s)\n' % gaugeno + \ 'max(s) = %7.3f, max(level) = %i' %(max_speed,maxlevel)) #show() # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() plotaxes.time_scale
<reponame>Ornamus/VillAInous<filename>monte.py # This is a very simple Python 2.7 implementation of the Information Set Monte Carlo Tree Search algorithm. # The function ISMCTS(rootstate, itermax, verbose = False) is towards the bottom of the code. # It aims to have the clearest and simplest possible code, and for the sake of clarity, the code # is orders of magnitude less efficient than it could be made, particularly by using a # state.GetRandomMove() or state.DoRandomRollout() function. # # An example GameState classes for Knockout Whist is included to give some idea of how you # can write your own GameState to use ISMCTS in your hidden information game. # # Written by <NAME>, <NAME>, <NAME> (University of York, UK) September 2012 - August 2013. # # Licence is granted to freely use and distribute for any sensible/legal purpose so long as this comment # remains in any distributed code. # # For more information about Monte Carlo Tree Search check out our web site at www.mcts.ai # Also read the article accompanying this code at ***URL HERE*** from math import * import random, sys from copy import deepcopy from numpy import loadtxt import numpy as np from tensorflow.keras import datasets, layers, models from keras.models import Sequential, load_model from keras.layers import Dense from keras.wrappers.scikit_learn import KerasRegressor from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.model_selection import KFold import tensorflow as tf import os import time os.environ["CUDA_VISIBLE_DEVICES"] = "-1" class GameState: """ A state of the game, i.e. the game board. These are the only functions which are absolutely necessary to implement ISMCTS in any imperfect information game, although they could be enhanced and made quicker, for example by using a GetRandomMove() function to generate a random move during rollout. By convention the players are numbered 1, 2, ..., self.numberOfPlayers. """ def __init__(self): self.numberOfPlayers = 2 self.playerToMove = 1 def GetNextPlayer(self, p): """ Return the player to the left of the specified player """ return (p % self.numberOfPlayers) + 1 def Clone(self): """ Create a deep clone of this game state. """ st = GameState() st.playerToMove = self.playerToMove return st def CloneAndRandomize(self, observer): """ Create a deep clone of this game state, randomizing any information not visible to the specified observer player. """ return self.Clone() def DoMove(self, move): """ Update a state by carrying out the given move. Must update playerToMove. """ self.playerToMove = self.GetNextPlayer(self.playerToMove) def GetMoves(self): """ Get all possible moves from this state. """ raise NotImplementedException() def GetResult(self, player): """ Get the game result from the viewpoint of player. """ raise NotImplementedException() def __repr__(self): """ Don't need this - but good style. """ pass class Node: """ A node in the game tree. Note wins is always from the viewpoint of playerJustMoved. """ def __init__(self, move = None, parent = None, playerJustMoved = None): self.move = move # the move that got us to this node - "None" for the root node self.parentNode = parent # "None" for the root node self.childNodes = [] self.wins = 0 self.visits = 0 self.avails = 1 self.playerJustMoved = playerJustMoved # the only part of the state that the Node needs later # NN stuff self.nn_w = 0 self.nn_q = 0 self.nn_value = 0 self.target_value = 0 self.nn_pred_prob = 0 self.target_prob = 0 def GetUntriedMoves(self, legalMoves): """ Return the elements of legalMoves for which this node does not have children. """ # Find all moves for which this node *does* have children triedMoves = [child.move for child in self.childNodes] # Return all moves that are legal but have not been tried yet return [move for move in legalMoves if move not in triedMoves] def UCBSelectChild(self, legalMoves, exploration = 0.7): """ Use the UCB1 formula to select a child node, filtered by the given list of legal moves. exploration is a constant balancing between exploitation and exploration, with default value 0.7 (approximately sqrt(2) / 2) """ # Filter the list of children by the list of legal moves legalChildren = [child for child in self.childNodes if child.move in legalMoves] # Get the child with the highest UCB score s = max(legalChildren, key = lambda c: float(c.wins)/float(c.visits) + exploration * sqrt(log(c.avails)/float(c.visits))) # Update availability counts -- it is easier to do this now than during backpropagation for child in legalChildren: child.avails += 1 # Return the child selected above return s def NNSelectChild(self, legalMoves, exploration = 0.7): # Filter the list of children by the list of legal moves legalChildren = [child for child in self.childNodes if child.move in legalMoves] child_visits_sum = 0 for child in legalChildren: child_visits_sum += child.visits # action-value + exploration * prior_prob * [weird stuff] s = max(legalChildren, key = lambda c: c.nn_q + exploration * c.nn_pred_prob * sqrt(child_visits_sum)/(1+c.visits)) # Update availability counts -- it is easier to do this now than during backpropagation for child in legalChildren: child.avails += 1 # Return the child selected above return s def AddChild(self, m, p): """ Add a new child node for the move m. Return the added child node """ n = Node(move = m, parent = self, playerJustMoved = p) self.childNodes.append(n) return n def Update(self, terminalState): """ Update this node - increment the visit count by one, and increase the win count by the result of terminalState for self.playerJustMoved. """ self.visits += 1 if self.playerJustMoved is not None: self.wins += terminalState.GetResult(self.playerJustMoved) def __repr__(self): #if self.nn_q == 0 and self.nn_value == 0 and self.nn_w == 0 and self.target_prob == 0: #return "[M:%s W/V/A: %4i/%4i/%4i]" % (self.move, self.wins, self.visits, self.avails) #else: return f"[M:{self.move} W/Q/V/P: {self.nn_value:4.1f} / {self.nn_q:4.1f} / {self.visits:4} / {self.nn_pred_prob:4.1f}" def TreeToString(self, indent): """ Represent the tree as a string, for debugging purposes. """ s = self.IndentString(indent) + str(self) for c in self.childNodes: s += c.TreeToString(indent+1) return s def IndentString(self,indent): s = "\n" for i in range (1,indent+1): s += "| " return s def ChildrenToString(self): s = "" for c in self.childNodes: s += str(c) + "\n" return s def get_model(model_type, train=False): if train: dataset = loadtxt('game.txt', delimiter=',') print(f"len: {len(dataset[0])}") X = dataset[:,0:42] #42 Y = dataset[:,42:] print(f"Top Y: {Y[0]}") def flat_model(): model = Sequential() model.add(Dense(42, input_dim=42, kernel_initializer='normal', activation='relu')) model.add(Dense(8, kernel_initializer='normal')) model.compile(loss='mean_squared_error', optimizer='adam') return model def baseline_model(): model = models.Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(6,7,1))) model.add(layers.MaxPooling2D((2, 2))) #model.add(layers.Conv2D(64, (3, 3), activation='relu')) #model.add(layers.MaxPooling2D((2, 2))) #model.add(layers.Conv2D(32, (3, 3), activation='relu')) model.add(layers.Flatten()) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(8)) model.compile( 'adam', loss='mean_squared_error', ) model.summary() return model if train and model_type == 1: boards = [] for data in X: #print("=========Loading Board========") board = [] prev = 0 row_sum = 0 for i in range(6): row = data[prev:(i+1)*7] board.append(row) prev = (i+1)*7 #print(row) row_sum += len(row) boards.append(board) boards = np.asarray(boards) boards = boards.reshape(len(boards), 6, 7, 1) #print(boards) if model_type == 0: estimator = KerasRegressor(build_fn=flat_model, epochs=5000, batch_size=100, verbose=2) if train: estimator.fit(X, Y) estimator.model.save("Con4_Flat_Recent.h5") else: estimator.model = load_model('Con4_Flat_Recent.h5')#load_model('Con4_Flat_Best.h5') return estimator elif model_type == 1: estimator = KerasRegressor(build_fn=baseline_model, epochs=300, batch_size=100, verbose=2) if train: estimator.fit(boards, Y) estimator.model.save("Con4_Conv_Recent.h5") else: estimator.model = load_model('Con4_Conv_Recent.h5') return estimator return None def init_model_villainous(): global model, estimator dataset = loadtxt('game.txt', delimiter=',') X = dataset[:,0:460] Y = dataset[:,460] def baseline_model(): # create model model = Sequential() model.add(Dense(460, input_dim=460, kernel_initializer='normal', activation='relu')) model.add(Dense(1, kernel_initializer='normal')) # Compile model model.compile(loss='mean_squared_error', optimizer='adam') return model # evaluate model estimator = KerasRegressor(build_fn=baseline_model, epochs=400, batch_size=50, verbose=2) print(f"Type of x: {type(X)}, Y: {type(Y)}") estimator.fit(X, Y) estimator.model.save("TheModel") #estimator.fit(X[0:1,:], Y[0:1], epochs=1, batch_size=1) return kfold = KFold(n_splits=10) results = cross_val_score(estimator, X, Y, cv=kfold) print("Baseline: %.2f (%.2f) MSE" % (results.mean(), results.std())) estimator.fit(X, Y) return records = [] data = [] data_x = [] data_y = [] def record(state, result, player): global records, data, estimator records.append((state, result)) entry = state.to_inputs(player) data_x.append(entry) data_y.append(result) #estimator.fit(np.array([entry]), np.array([result]), epochs=1, batch_size=1) entry = deepcopy(entry) entry.append(result) data.append(entry) def ISMCTS(rootstate, itermax, verbose = False, rollout_agent=None): """ Conduct an ISMCTS search for itermax iterations starting from rootstate. Return the best move from the rootstate. """
= set() self.ns_prefix_ = node.prefix self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_) return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None): if nodeName_ == 'PIN': obj_ = PIN.factory(parent_object_=self) obj_.build(child_, gds_collector_=gds_collector_) self.PIN = obj_ obj_.original_tagname_ = 'PIN' elif nodeName_ == 'DocumentTypes': obj_ = DocumentTypes.factory(parent_object_=self) obj_.build(child_, gds_collector_=gds_collector_) self.DocumentTypes = obj_ obj_.original_tagname_ = 'DocumentTypes' # end class DocumentCriteria class PIN(GeneratedsSuper): """PIN""" __hash__ = GeneratedsSuper.__hash__ subclass = None superclass = None def __init__(self, Value=None, gds_collector_=None, **kwargs_): self.gds_collector_ = gds_collector_ self.gds_elementtree_node_ = None self.original_tagname_ = None self.parent_object_ = kwargs_.get('parent_object_') self.ns_prefix_ = None self.Value = Value self.Value_nsprefix_ = None def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, PIN) if subclass is not None: return subclass(*args_, **kwargs_) if PIN.subclass: return PIN.subclass(*args_, **kwargs_) else: return PIN(*args_, **kwargs_) factory = staticmethod(factory) def get_ns_prefix_(self): return self.ns_prefix_ def set_ns_prefix_(self, ns_prefix): self.ns_prefix_ = ns_prefix def get_Value(self): return self.Value def set_Value(self, Value): self.Value = Value def hasContent_(self): if ( self.Value is not None ): return True else: return False def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='PIN', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('PIN') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None and name_ == 'PIN': name_ = self.original_tagname_ if UseCapturedNS_ and self.ns_prefix_: namespaceprefix_ = self.ns_prefix_ + ':' showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='PIN') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='PIN', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='PIN'): pass def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='PIN', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Value is not None: namespaceprefix_ = self.Value_nsprefix_ + ':' if (UseCapturedNS_ and self.Value_nsprefix_) else '' showIndent(outfile, level, pretty_print) outfile.write('<%sValue>%s</%sValue>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Value), input_name='Value')), namespaceprefix_ , eol_)) def build(self, node, gds_collector_=None): self.gds_collector_ = gds_collector_ if SaveElementTreeNode: self.gds_elementtree_node_ = node already_processed = set() self.ns_prefix_ = node.prefix self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_) return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None): if nodeName_ == 'Value': value_ = child_.text value_ = self.gds_parse_string(value_, node, 'Value') value_ = self.gds_validate_string(value_, node, 'Value') self.Value = value_ self.Value_nsprefix_ = child_.prefix # end class PIN class DocumentTypes(GeneratedsSuper): __hash__ = GeneratedsSuper.__hash__ subclass = None superclass = None def __init__(self, DocumentType=None, gds_collector_=None, **kwargs_): self.gds_collector_ = gds_collector_ self.gds_elementtree_node_ = None self.original_tagname_ = None self.parent_object_ = kwargs_.get('parent_object_') self.ns_prefix_ = None if DocumentType is None: self.DocumentType = [] else: self.DocumentType = DocumentType self.DocumentType_nsprefix_ = None def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, DocumentTypes) if subclass is not None: return subclass(*args_, **kwargs_) if DocumentTypes.subclass: return DocumentTypes.subclass(*args_, **kwargs_) else: return DocumentTypes(*args_, **kwargs_) factory = staticmethod(factory) def get_ns_prefix_(self): return self.ns_prefix_ def set_ns_prefix_(self, ns_prefix): self.ns_prefix_ = ns_prefix def get_DocumentType(self): return self.DocumentType def set_DocumentType(self, DocumentType): self.DocumentType = DocumentType def add_DocumentType(self, value): self.DocumentType.append(value) def insert_DocumentType_at(self, index, value): self.DocumentType.insert(index, value) def replace_DocumentType_at(self, index, value): self.DocumentType[index] = value def hasContent_(self): if ( self.DocumentType ): return True else: return False def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DocumentTypes', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('DocumentTypes') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None and name_ == 'DocumentTypes': name_ = self.original_tagname_ if UseCapturedNS_ and self.ns_prefix_: namespaceprefix_ = self.ns_prefix_ + ':' showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='DocumentTypes') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='DocumentTypes', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='DocumentTypes'): pass def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DocumentTypes', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' for DocumentType_ in self.DocumentType: namespaceprefix_ = self.DocumentType_nsprefix_ + ':' if (UseCapturedNS_ and self.DocumentType_nsprefix_) else '' showIndent(outfile, level, pretty_print) outfile.write('<%sDocumentType>%s</%sDocumentType>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(DocumentType_), input_name='DocumentType')), namespaceprefix_ , eol_)) def build(self, node, gds_collector_=None): self.gds_collector_ = gds_collector_ if SaveElementTreeNode: self.gds_elementtree_node_ = node already_processed = set() self.ns_prefix_ = node.prefix self.buildAttributes(node, node.attrib, already_processed) for child in node: nodeName_ = Tag_pattern_.match(child.tag).groups()[-1] self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_) return self def buildAttributes(self, node, attrs, already_processed): pass def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None): if nodeName_ == 'DocumentType': value_ = child_.text value_ = self.gds_parse_string(value_, node, 'DocumentType') value_ = self.gds_validate_string(value_, node, 'DocumentType') self.DocumentType.append(value_) self.DocumentType_nsprefix_ = child_.prefix # end class DocumentTypes class RequestContext(GeneratedsSuper): """RequestContext""" __hash__ = GeneratedsSuper.__hash__ subclass = None superclass = None def __init__(self, Version=None, Language=None, GroupID=None, RequestReference=None, UserToken=None, gds_collector_=None, **kwargs_): self.gds_collector_ = gds_collector_ self.gds_elementtree_node_ = None self.original_tagname_ = None self.parent_object_ = kwargs_.get('parent_object_') self.ns_prefix_ = None self.Version = Version self.Version_nsprefix_ = None self.Language = Language self.validate_Language(self.Language) self.Language_nsprefix_ = None self.GroupID = GroupID self.GroupID_nsprefix_ = None self.RequestReference = RequestReference self.RequestReference_nsprefix_ = None self.UserToken = UserToken self.UserToken_nsprefix_ = None def factory(*args_, **kwargs_): if CurrentSubclassModule_ is not None: subclass = getSubclassFromModule_( CurrentSubclassModule_, RequestContext) if subclass is not None: return subclass(*args_, **kwargs_) if RequestContext.subclass: return RequestContext.subclass(*args_, **kwargs_) else: return RequestContext(*args_, **kwargs_) factory = staticmethod(factory) def get_ns_prefix_(self): return self.ns_prefix_ def set_ns_prefix_(self, ns_prefix): self.ns_prefix_ = ns_prefix def get_Version(self): return self.Version def set_Version(self, Version): self.Version = Version def get_Language(self): return self.Language def set_Language(self, Language): self.Language = Language def get_GroupID(self): return self.GroupID def set_GroupID(self, GroupID): self.GroupID = GroupID def get_RequestReference(self): return self.RequestReference def set_RequestReference(self, RequestReference): self.RequestReference = RequestReference def get_UserToken(self): return self.UserToken def set_UserToken(self, UserToken): self.UserToken = UserToken def validate_Language(self, value): result = True # Validate type Language, a restriction on xsd:string. if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None: if not isinstance(value, str): lineno = self.gds_get_node_lineno_() self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, }) return False value = value enumerations = ['en', 'fr'] if value not in enumerations: lineno = self.gds_get_node_lineno_() self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on Language' % {"value" : encode_str_2_3(value), "lineno": lineno} ) result = False return result def hasContent_(self): if ( self.Version is not None or self.Language is not None or self.GroupID is not None or self.RequestReference is not None or self.UserToken is not None ): return True else: return False def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='RequestContext', pretty_print=True): imported_ns_def_ = GenerateDSNamespaceDefs_.get('RequestContext') if imported_ns_def_ is not None: namespacedef_ = imported_ns_def_ if pretty_print: eol_ = '\n' else: eol_ = '' if self.original_tagname_ is not None and name_ == 'RequestContext': name_ = self.original_tagname_ if UseCapturedNS_ and self.ns_prefix_: namespaceprefix_ = self.ns_prefix_ + ':' showIndent(outfile, level, pretty_print) outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', )) already_processed = set() self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='RequestContext') if self.hasContent_(): outfile.write('>%s' % (eol_, )) self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='RequestContext', pretty_print=pretty_print) showIndent(outfile, level, pretty_print) outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_)) else: outfile.write('/>%s' % (eol_, )) def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='RequestContext'): pass def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='RequestContext', fromsubclass_=False, pretty_print=True): if pretty_print: eol_ = '\n' else: eol_ = '' if self.Version is not None: namespaceprefix_ = self.Version_nsprefix_ + ':' if (UseCapturedNS_ and self.Version_nsprefix_) else '' showIndent(outfile, level, pretty_print) outfile.write('<%sVersion>%s</%sVersion>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Version), input_name='Version')), namespaceprefix_ , eol_)) if self.Language is not None: namespaceprefix_ = self.Language_nsprefix_ + ':' if (UseCapturedNS_ and self.Language_nsprefix_) else '' showIndent(outfile, level, pretty_print) outfile.write('<%sLanguage>%s</%sLanguage>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Language), input_name='Language')), namespaceprefix_ , eol_)) if self.GroupID is not None: namespaceprefix_ = self.GroupID_nsprefix_ + ':' if (UseCapturedNS_ and self.GroupID_nsprefix_) else '' showIndent(outfile, level, pretty_print) outfile.write('<%sGroupID>%s</%sGroupID>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.GroupID), input_name='GroupID')), namespaceprefix_ , eol_)) if self.RequestReference is not None: namespaceprefix_ = self.RequestReference_nsprefix_ + ':' if (UseCapturedNS_ and self.RequestReference_nsprefix_) else '' showIndent(outfile, level, pretty_print) outfile.write('<%sRequestReference>%s</%sRequestReference>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.RequestReference), input_name='RequestReference')), namespaceprefix_ , eol_)) if self.UserToken is not None: namespaceprefix_ = self.UserToken_nsprefix_ + ':' if (UseCapturedNS_ and
* x) L1 += 0.00000000213 * mu.cost(0.21127914063 + 1505.28780909299 * x) L1 += 0.00000000267 * mu.cost(5.16501015011 + 3205.54734666440 * x) L1 += 0.00000000212 * mu.cost(4.26202838353 + 6546.15977336420 * x) L1 += 0.00000000211 * mu.cost(6.21401684263 + 3253.30422216000 * x) L1 += 0.00000000233 * mu.cost(3.72007597749 + 3346.13535100720 * x) L1 += 0.00000000274 * mu.cost(2.91986569135 + 10713.99488132620 * x) L1 += 0.00000000201 * mu.cost(3.36695295492 + 6.68366387410 * x) L1 += 0.00000000223 * mu.cost(3.08788599159 + 401.67212175720 * x) L1 += 0.00000000234 * mu.cost(2.24268269202 + 110.20632121940 * x) L1 += 0.00000000264 * mu.cost(2.08178742740 + 6475.03930496240 * x) L1 += 0.00000000231 * mu.cost(4.53806384480 + 9602.35263622420 * x) L1 += 0.00000000213 * mu.cost(2.85452302656 + 5415.65737477320 * x) L1 += 0.00000000195 * mu.cost(0.99589439506 + 5642.19824260920 * x) L1 += 0.00000000259 * mu.cost(0.00464351114 + 9380.95967271720 * x) L1 += 0.00000000197 * mu.cost(3.32573550633 + 3657.00429635640 * x) L1 += 0.00000000228 * mu.cost(5.33299975472 + 3561.02506913860 * x) L1 += 0.00000000193 * mu.cost(1.25502846507 + 6606.44325483230 * x) L1 += 0.00000000199 * mu.cost(1.13665869139 + 685.04405422600 * x) L1 += 0.00000000227 * mu.cost(4.49610509002 + 589.06482700820 * x) L1 += 0.00000000217 * mu.cost(5.48740879816 + 10596.18207843420 * x) L1 += 0.00000000192 * mu.cost(4.26501800444 + 3333.56619000180 * x) L1 += 0.00000000188 * mu.cost(1.44301618203 + 4885.96640967860 * x) L1 += 0.00000000178 * mu.cost(4.82506490541 + 9070.11887384880 * x) L1 += 0.00000000184 * mu.cost(5.69637552141 + 3351.24909204960 * x) L1 += 0.00000000187 * mu.cost(0.76021337348 + 16699.53901514999 * x) L1 += 0.00000000226 * mu.cost(0.82767654373 + 3265.83082813250 * x) L1 += 0.00000000204 * mu.cost(6.20933387021 + 394.62588505920 * x) L1 += 0.00000000176 * mu.cost(3.89567349231 + 10028.95082710020 * x) L1 += 0.00000000174 * mu.cost(3.68843293982 + 735.87651353180 * x) L1 += 0.00000000173 * mu.cost(2.44269377255 + 3603.69635007260 * x) L1 += 0.00000000177 * mu.cost(1.24154853329 + 12722.55242048520 * x) L1 += 0.00000000184 * mu.cost(4.77203925989 + 286.96236112060 * x) L1 += 0.00000000171 * mu.cost(4.67140116008 + 20199.09495963300 * x) L1 += 0.00000000170 * mu.cost(5.13753345526 + 1332.05488754080 * x) L1 += 0.00000000201 * mu.cost(2.37863157745 + 16276.46394262300 * x) L1 += 0.00000000209 * mu.cost(0.57156268506 + 11250.79939342160 * x) L1 += 0.00000000164 * mu.cost(1.98441291396 + 10014.72373309860 * x) L1 += 0.00000000191 * mu.cost(0.60250751218 + 56.80326216980 * x) L1 += 0.00000000171 * mu.cost(6.22556266993 + 17277.40693183380 * x) L1 += 0.00000000166 * mu.cost(1.05948008727 + 19513.98359510420 * x) L1 += 0.00000000163 * mu.cost(1.59661610701 + 1437.17561419860 * x) L1 += 0.00000000165 * mu.cost(3.36308723589 + 6665.97238221460 * x) L1 += 0.00000000184 * mu.cost(3.20554894393 + 263.08392337280 * x) L1 += 0.00000000212 * mu.cost(3.10485836003 + 4039.88357492740 * x) L1 += 0.00000000176 * mu.cost(3.41768939214 + 9468.26787725700 * x) L1 += 0.00000000163 * mu.cost(1.39275730949 + 8982.81066930900 * x) L1 += 0.00000000213 * mu.cost(3.39734274482 + 931.36308685180 * x) L1 += 0.00000000189 * mu.cost(4.54004144896 + 8542.97070603500 * x) L1 += 0.00000000191 * mu.cost(1.15555618959 + 3169.93955608060 * x) L1 += 0.00000000155 * mu.cost(1.41249963094 + 22.76849660940 * x) L1 += 0.00000000153 * mu.cost(5.14168081601 + 156.40072050240 * x) L1 += 0.00000000159 * mu.cost(3.64996617906 + 8013.27974094040 * x) L1 += 0.00000000151 * mu.cost(1.93804487507 + 3384.33133900480 * x) L1 += 0.00000000157 * mu.cost(0.58554505759 + 158.94351778320 * x) L1 += 0.00000000173 * mu.cost(2.72517427493 + 2807.39834325620 * x) L1 += 0.00000000159 * mu.cost(0.67192454133 + 13892.14067189380 * x) L1 += 0.00000000150 * mu.cost(2.66045714174 + 19004.64794940840 * x) L1 += 0.00000000192 * mu.cost(5.73782632783 + 206.70073729660 * x) L1 += 0.00000000143 * mu.cost(3.19213280913 + 6843.69148953180 * x) L1 += 0.00000000194 * mu.cost(1.32358882667 + 19402.79695281660 * x) L1 += 0.00000000143 * mu.cost(2.36478163720 + 13207.02930736500 * x) L1 += 0.00000000140 * mu.cost(1.88800568840 + 11766.26326451460 * x) L1 += 0.00000000144 * mu.cost(0.69018080218 + 17085.95866572220 * x) L1 += 0.00000000183 * mu.cost(5.98085295555 + 13362.51701710200 * x) L1 += 0.00000000161 * mu.cost(2.92764155222 + 5.85720229960 * x) L1 += 0.00000000162 * mu.cost(6.07051064413 + 6701.58017279840 * x) L1 += 0.00000000192 * mu.cost(0.86266150575 + 2814.44457995420 * x) L1 += 0.00000000182 * mu.cost(5.26446797092 + 3873.82651014340 * x) L1 += 0.00000000137 * mu.cost(0.41563614709 + 5820.91492464680 * x) L1 += 0.00000000144 * mu.cost(3.02314051168 + 708.98980227659 * x) L1 += 0.00000000184 * mu.cost(4.61314496499 + 3329.97576135000 * x) L1 += 0.00000000131 * mu.cost(3.48156082643 + 367.22432896240 * x) L1 += 0.00000000173 * mu.cost(3.09922849765 + 12295.95422960920 * x) L1 += 0.00000000135 * mu.cost(2.23311632892 + 15664.03552270859 * x) L1 += 0.00000000147 * mu.cost(1.95810911154 + 5732.04924442980 * x) L1 += 0.00000000158 * mu.cost(1.48909254724 + 29.49181830340 * x) L1 += 0.00000000127 * mu.cost(5.55534080040 + 3368.01398279660 * x) L1 += 0.00000000129 * mu.cost(1.78002583252 + 22743.40937951640 * x) L1 += 0.00000000132 * mu.cost(2.81496895377 + 21795.21409161479 * x) L1 += 0.00000000127 * mu.cost(5.73090203501 + 3340.19235060619 * x) L1 += 0.00000000164 * mu.cost(1.87613918877 + 6709.67404086740 * x) L1 += 0.00000000123 * mu.cost(3.61238958991 + 22324.90505670940 * x) L1 += 0.00000000129 * mu.cost(4.92064308735 + 2540.79130153440 * x) L1 += 0.00000000121 * mu.cost(6.16922638434 + 20206.14119633100 * x) L1 += 0.00000000122 * mu.cost(5.79901866314 + 1854.63230563460 * x) L1 += 0.00000000133 * mu.cost(0.50941998058 + 3274.12501778540 * x) L1 += 0.00000000151 * mu.cost(1.61342807879 + 1107.13880568480 * x) L1 += 0.00000000165 * mu.cost(2.02795177586 + 290.48547946960 * x) L1 += 0.00000000125 * mu.cost(0.52719797619 + 2604.73591316800 * x) L1 += 0.00000000144 * mu.cost(5.68526782434 + 8827.39026987480 * x) L1 += 0.00000000126 * mu.cost(3.80246508251 + 765.79306444640 * x) L1 += 0.00000000116 * mu.cost(1.79450246249 + 647.01083331480 * x) L1 += 0.00000000126 * mu.cost(2.00195272473 + 699.27114822760 * x) L1 += 0.00000000147 * mu.cost(6.22619740782 + 6040.34724601740 * x) L1 += 0.00000000119 * mu.cost(2.05840518265 + 15121.10278521600 * x) L1 += 0.00000000114 * mu.cost(2.74877091470 + 6460.81221096080 * x) L1 += 0.00000000155 * mu.cost(1.78154091696 + 21265.52312652020 * x) L1 += 0.00000000146 * mu.cost(3.37351237411 + 1861.74585263540 * x) L1 += 0.00000000118 * mu.cost(4.07281676691 + 418.50432280700 * x) L1 += 0.00000000116 * mu.cost(0.10434606071 + 13362.38239649640 * x) L1 += 0.00000000129 * mu.cost(0.78419803719 + 3427.92063123960 * x) L1 += 0.00000000152 * mu.cost(0.32620694442 + 3443.70520091840 * x) L1 += 0.00000000110 * mu.cost(0.56398082486 + 661.23292678100 * x) L1 += 0.00000000111 * mu.cost(4.05380946072 + 568.82187402740 * x) L1 += 0.00000000108 * mu.cost(3.17700641574 + 3448.27595063840 * x) L1 += 0.00000000138 * mu.cost(4.47698517191 + 3326.38533269820 * x) L1 += 0.00000000108 * mu.cost(4.89922372003 + 9588.12554222260 * x) L1 += 0.00000000114 * mu.cost(4.80828825403 + 6657.34641565180 * x) L1 += 0.00000000108 * mu.cost(4.10637483972 + 13553.89797291080 * x) L1 += 0.00000000125 * mu.cost(0.33573243959 + 18849.22754997420 * x) L1 += 0.00000000115 * mu.cost(3.18885465852 + 2409.24933984800 * x) L1 += 0.00000000104 * mu.cost(3.23074163851 + 3472.15438838620 * x) L1 += 0.00000000104 * mu.cost(0.09799515047 + 30065.51184029820 * x) L1 += 0.00000000112 * mu.cost(1.64487733528 + 10001.06188460700 * x) L1 += 0.00000000143 * mu.cost(3.53781769283 + 6518.75821726740 * x) L1 += 0.00000000113 * mu.cost(5.20979306912 + 2125.87740737920 * x) L1 += 0.00000000104 * mu.cost(2.77582098882 + 38.13303563780 * x) L1 += 0.00000000133 * mu.cost(5.88513337452 + 5835.14201864840 * x) L1 += 0.00000000105 * mu.cost(4.11662579413 + 6675.70192909220 * x) L1 += 0.00000000102 * mu.cost(0.60100887043 + 10264.56588407340 * x) L1 += 0.00000000101 * mu.cost(3.78636130664 + 10042.61267559180 * x) L1 += 0.00000000139 * mu.cost(1.80936944447 + 12323.42309600880 * x) L1 += 0.00000000101 * mu.cost(2.47217208753 + 7380.49600162720 * x) L1 += 0.00000000130 * mu.cost(2.53454569863 + 11769.85369316640 * x) L1 += 0.00000000100 * mu.cost(5.72291104291 + 14.22709400160 * x) L1 += 0.00000000135 * mu.cost(4.20237564510 + 4672.66731424060 * x) L1 += 0.00000000133 * mu.cost(0.34413768012 + 16489.76303806100 * x) L1 += 0.00000000098 * mu.cost(1.44874403589 + 3370.04193523580 * x) L1 += 0.00000000131 * mu.cost(1.31336606248 + 3313.21087060300 * x) L1 += 0.00000000111 * mu.cost(3.12463539337 + 309.27832265580 * x) L1 += 0.00000000101 * mu.cost(3.15369992044 + 24150.08005134500 * x) L1 += 0.00000000102 * mu.cost(6.13479937096 + 2277.70737816160 * x) L1 += 0.00000000099 * mu.cost(0.10085261274 + 12839.87228870540 * x) L1 += 0.00000000134 * mu.cost(2.91637947295 + 57.87869600380 * x) L1 += 0.00000000104 * mu.cost(3.30283052330 + 3399.98628861340 * x) L1 += 0.00000000108 * mu.cost(4.92699760221 + 802.36392244620 * x) L1 += 0.00000000106 * mu.cost(2.89298330043 + 7799.98064550240 * x) L1 += 0.00000000112 * mu.cost(3.12761163915 + 5989.06725217280 * x) L1 += 0.00000000094 * mu.cost(3.42562596561 + 3510.19260983280 * x) L1 += 0.00000000102 * mu.cost(0.94285421551 + 3209.07046501340 * x) L1 += 0.00000000096 * mu.cost(0.79636181668 + 3024.22055704320 * x) L1 += 0.00000000093 * mu.cost(1.08979608844 + 14577.18472611980 * x)
text='Transylvania', command=self.Menu_Transylvania, width=14, height=3) Button_Transylvania.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Tyrrell = tk.Button(master=back, text='Tyrrell', command=self.Menu_Tyrrell, width=14, height=3) Button_Tyrrell.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Union = tk.Button(master=back, text='Union', command=self.Menu_Union, width=14, height=3) Button_Union.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Vance = tk.Button(master=back, text='Vance', command=self.Menu_Vance, width=14, height=3) Button_Vance.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Wake = tk.Button(master=back, text='Wake', command=self.Menu_Wake, width=14, height=3) Button_Wake.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Warren = tk.Button(master=back, text='Warren', command=self.Menu_Warren, width=14, height=3) Button_Warren.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Washington = tk.Button(master=back, text='Washington', command=self.Menu_Washington, width=14, height=3) Button_Washington.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Watauga = tk.Button(master=back, text='Watauga', command=self.Menu_Watauga, width=14, height=3) Button_Watauga.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Wayne = tk.Button(master=back, text='Wayne', command=self.Menu_Wayne, width=14, height=3) Button_Wayne.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Wilkes = tk.Button(master=back, text='Wilkes', command=self.Menu_Wilkes, width=14, height=3) Button_Wilkes.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Wilson = tk.Button(master=back, text='Wilson', command=self.Menu_Wilson, width=14, height=3) Button_Wilson.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Yadkin = tk.Button(master=back, text='Yadkin', command=self.Menu_Yadkin, width=14, height=3) Button_Yadkin.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Yancey = tk.Button(master=back, text='Yancey', command=self.Menu_Yancey, width=14, height=3) Button_Yancey.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) def Menu_Alamance(self): self.mw.destroy() self.mw = tk.Tk() #Specify the attributes for all widgets simply like this. self.mw.option_add("*Button.Background", "Teal") self.mw.option_add("*Button.Foreground", "White") self.mw.title('OP25 Repeater Selector GUI') #You can set the geometry attribute to change the root windows size self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens) self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction back = tk.Frame(master=self.mw,bg='Grey') back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window #Buttons Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3) Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3) GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3) GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Altamahaw = tk.Button(master=back, text='Altamahaw', command=CMD_Altamahaw, width=14, height=3) Button_Altamahaw.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Cane_Mtn = tk.Button(master=back, text='Cane_Mtn', command=CMD_Cane_Mtn, width=14, height=3) Button_Cane_Mtn.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Mebane = tk.Button(master=back, text='Mebane', command=CMD_Mebane, width=14, height=3) Button_Mebane.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) def Menu_Alexander(self): self.mw.destroy() self.mw = tk.Tk() #Specify the attributes for all widgets simply like this. self.mw.option_add("*Button.Background", "Teal") self.mw.option_add("*Button.Foreground", "White") self.mw.title('OP25 Repeater Selector GUI') #You can set the geometry attribute to change the root windows size self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens) self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction back = tk.Frame(master=self.mw,bg='Grey') back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window #Buttons Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3) Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3) GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3) GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Barretts_Mountain = tk.Button(master=back, text='Barretts_Mountain', command=CMD_Barretts_Mountain, width=14, height=3) Button_Barretts_Mountain.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) def Menu_Alleghany(self): self.mw.destroy() self.mw = tk.Tk() #Specify the attributes for all widgets simply like this. self.mw.option_add("*Button.Background", "Teal") self.mw.option_add("*Button.Foreground", "White") self.mw.title('OP25 Repeater Selector GUI') #You can set the geometry attribute to change the root windows size self.mw.geometry("800x420") #You want the size of the app to be 750 X 562.5 Pixels (Slightky Smaller than the RPI 7" Touch Screens) self.mw.resizable(0, 0) #Don't allow resizing in the x or y direction back = tk.Frame(master=self.mw,bg='Grey') back.pack_propagate(0) #Don't allow the widgets inside to determine the frame's width / height back.pack(fill=tk.BOTH, expand=1) #Expand the frame to fill the root window #Buttons Stop_OP25 = tk.Button(master=back, text='Stop OP25 Instances', command=stopall, width=14, height=3) Stop_OP25.grid(row=0, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) GoToFavorites_OP25 = tk.Button(master=back, text='Go To Favorites', command=self.Favorites, width=14, height=3) GoToFavorites_OP25.grid(row=0, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) GoToNCCounties_OP25 = tk.Button(master=back, text='Go To NC Counties', command=self.NC_Counties_Home, width=14, height=3) GoToNCCounties_OP25.grid(row=0, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Doughton_Mtn = tk.Button(master=back, text='Doughton_Mtn', command=CMD_Doughton_Mtn, width=14, height=3) Button_Doughton_Mtn.grid(row=1, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_Green_Mtn = tk.Button(master=back, text='Green_Mtn', command=CMD_Green_Mtn, width=14, height=3) Button_Green_Mtn.grid(row=1, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=1, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=1, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=1, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=2, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=3, column=5, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=1, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=2, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=3, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ = tk.Button(master=back, text='', command='', width=14, height=3) Button_.grid(row=4, column=4, sticky=tk.W+tk.E+tk.N+tk.S, padx=9, pady=5) Button_ =
simulation ends either explicitly by provided a trailing empty campaign or by deriving the end date from the crop calendar and timed events in the last campaign. See also the section below on `end_date` property. Each campaign is characterized by zero or one crop calendar, zero or more timed events and zero or more state events. The structure of the data needed as input for AgroManager is most easily understood with the example (in YAML) below. The definition consists of three campaigns, the first starting on 1999-08-01, the second starting on 2000-09-01 and the last campaign starting on 2001-03-01. The first campaign consists of a crop calendar for winter-wheat starting with sowing at the given crop_start_date. During the campaign there are timed events for irrigation at 2000-05-25 and 2000-06-30. Moreover, there are state events for fertilizer application (event_signal: apply_npk) given by development stage (DVS) at DVS 0.3, 0.6 and 1.12. The second campaign has no crop calendar, timed events or state events. This means that this is a period of bare soil with only the water balance running. The third campaign is for fodder maize sown at 2001-04-15 with two series of timed events (one for irrigation and one for N/P/K application) and no state events. The end date of the simulation in this case will be 2001-11-01 (2001-04-15 + 200 days). An example of an agromanagement definition file:: AgroManagement: - 1999-08-01: CropCalendar: crop_name: wheat variety_name: winter-wheat crop_start_date: 1999-09-15 crop_start_type: sowing crop_end_date: crop_end_type: maturity max_duration: 300 TimedEvents: - event_signal: irrigate name: Timed irrigation events comment: All irrigation amounts in cm events_table: - 2000-05-25: {irrigation_amount: 3.0} - 2000-06-30: {irrigation_amount: 2.5} StateEvents: - event_signal: apply_npk event_state: DVS zero_condition: rising name: DVS-based N/P/K application table comment: all fertilizer amounts in kg/ha events_table: - 0.3: {N_amount : 1, P_amount: 3, K_amount: 4} - 0.6: {N_amount: 11, P_amount: 13, K_amount: 14} - 1.12: {N_amount: 21, P_amount: 23, K_amount: 24} - 2000-09-01: CropCalendar: TimedEvents: StateEvents - 2001-03-01: CropCalendar: crop_name: maize variety_name: fodder-maize crop_start_date: 2001-04-15 crop_start_type: sowing crop_end_date: crop_end_type: maturity max_duration: 200 TimedEvents: - event_signal: irrigate name: Timed irrigation events comment: All irrigation amounts in cm events_table: - 2001-06-01: {irrigation_amount: 2.0} - 2001-07-21: {irrigation_amount: 5.0} - 2001-08-18: {irrigation_amount: 3.0} - 2001-09-19: {irrigation_amount: 2.5} - event_signal: apply_npk name: Timed N/P/K application table comment: All fertilizer amounts in kg/ha events_table: - 2001-05-25: {N_amount : 50, P_amount: 25, K_amount: 22} - 2001-07-05: {N_amount : 70, P_amount: 35, K_amount: 32} StateEvents: """ # campaign start dates campaign_start_dates = List() # Overall engine start date and end date _start_date = Instance(date) _end_date = Instance(date) # campaign definitions crop_calendars = List() timed_event_dispatchers = List() state_event_dispatchers = List() _tmp_date = None # Helper variable _icampaign = 0 # count the campaigns def initialize(self, kiosk, agromanagement): """Initialize the AgroManager. :param kiosk: A PCSE variable Kiosk :param agromanagement: the agromanagement definition, see the example above in YAML. """ self.kiosk = kiosk self.crop_calendars = [] self.timed_event_dispatchers = [] self.state_event_dispatchers = [] self.campaign_start_dates = [] # Connect CROP_FINISH signal with handler self._connect_signal(self._on_CROP_FINISH, signals.crop_finish) # First get and validate the dates of the different campaigns for campaign in agromanagement: # Check if campaign start dates is in chronological order campaign_start_date = campaign.keys()[0] self._check_campaign_date(campaign_start_date) self.campaign_start_dates.append(campaign_start_date) # Add None to the list of campaign dates to signal the end of the # number of campaigns. self.campaign_start_dates.append(None) # Walk through the different campaigns and build crop calendars and # timed/state event dispatchers for campaign, campaign_start, next_campaign in \ zip(agromanagement, self.campaign_start_dates[:-1], self.campaign_start_dates[1:]): # Get the campaign definition for the start date campaign_def = campaign[campaign_start] if self._is_empty_campaign(campaign_def): # no campaign definition for this campaign, e.g. fallow self.crop_calendars.append(None) self.timed_event_dispatchers.append(None) self.state_event_dispatchers.append(None) continue # get crop calendar definition for this campaign cc_def = campaign_def['CropCalendar'] if cc_def is not None: cc = CropCalendar(kiosk, **cc_def) cc.validate(campaign_start, next_campaign) self.crop_calendars.append(cc) else: self.crop_calendars.append(None) # Get definition of timed events and build TimedEventsDispatchers te_def = campaign_def['TimedEvents'] if te_def is not None: te_dsp = self._build_TimedEventDispatchers(kiosk, te_def) for te in te_dsp: te.validate(campaign_start, next_campaign) self.timed_event_dispatchers.append(te_dsp) else: self.timed_event_dispatchers.append(None) # Get definition of state events and build StateEventsDispatchers se_def = campaign_def['StateEvents'] if se_def is not None: se_dsp = self._build_StateEventDispatchers(kiosk, se_def) self.state_event_dispatchers.append(se_dsp) else: self.state_event_dispatchers.append(None) def _is_empty_campaign(self, campaign_def): """"Check if the campaign definition is empty""" if campaign_def is None: return True attrs = ["CropCalendar", "TimedEvents", "StateEvents"] r = [] for attr in attrs: if attr in campaign_def: if campaign_def[attr] is None: r.append(True) else: r.append(False) if r == [True]*3: return True return False @property def start_date(self): """Retrieves the start date of the agromanagement sequence, e.g. the first simulation date :return: a date object """ if self._start_date is None: self._start_date = self.campaign_start_dates[0] return self._start_date @property def end_date(self): """Retrieves the end date of the agromanagement sequence, e.g. the last simulation date. :return: a date object Getting the last simulation date is more complicated because there are two options. **1. Adding an explicit trailing empty campaign** The first option is to explicitly define the end date of the simulation by adding a 'trailing empty campaign' to the agromanagement definition. An example of an agromanagement definition with a 'trailing empty campaigns' (YAML format) is given below. This example will run the simulation until 2001-01-01:: Version: 1.0 AgroManagement: - 1999-08-01: CropCalendar: crop_name: winter-wheat variety_name: winter-wheat crop_start_date: 1999-09-15 crop_start_type: sowing crop_end_date: crop_end_type: maturity max_duration: 300 TimedEvents: StateEvents: - 2001-01-01: Note that in configurations where the last campaign contains a definition for state events, a trailing empty campaign *must* be provided because the end date cannot be determined. The following campaign definition will therefore lead to an error:: Version: 1.0 AgroManagement: - 2001-01-01: CropCalendar: crop_name: maize variety_name: fodder-maize crop_start_date: 2001-04-15 crop_start_type: sowing crop_end_date: crop_end_type: maturity max_duration: 200 TimedEvents: StateEvents: - event_signal: apply_npk event_state: DVS zero_condition: rising name: DVS-based N/P/K application table comment: all fertilizer amounts in kg/ha events_table: - 0.3: {N_amount : 1, P_amount: 3, K_amount: 4} - 0.6: {N_amount: 11, P_amount: 13, K_amount: 14} - 1.12: {N_amount: 21, P_amount: 23, K_amount: 24} **2. Without an explicit trailing campaign** The second option is that there is no trailing empty campaign and in that case the end date of the simulation is retrieved from the crop calendar and/or the timed events that are scheduled. In the example below, the end date will be 2000-08-05 as this is the harvest date and there are no timed events scheduled after this date:: Version: 1.0 AgroManagement: - 1999-09-01: CropCalendar: crop_name: wheat variety_name: winter-wheat crop_start_date: 1999-10-01 crop_start_type: sowing crop_end_date: 2000-08-05 crop_end_type: harvest max_duration: 330 TimedEvents: - event_signal: irrigate name: Timed irrigation events comment: All irrigation amounts in cm events_table: - 2000-05-01: {irrigation_amount: 2, efficiency: 0.7} - 2000-06-21: {irrigation_amount: 5, efficiency: 0.7} - 2000-07-18: {irrigation_amount: 3, efficiency: 0.7} StateEvents: In the case that there is no harvest date provided and the crop runs till maturity, the end date from the crop calendar will be estimated as the crop_start_date plus the max_duration. """ if self._end_date is None: # First check if the last campaign definition is an empty trailing campaign and use that date. if self.crop_calendars[-1] is None and \ self.timed_event_dispatchers[-1] is None and \ self.state_event_dispatchers[-1] is None: self._end_date = self.campaign_start_dates[-2] # use -2 here because None is # appended to campaign_start_dates return self._end_date # Check if there are state events defined in the last campaign without specifying the end date # explicitly with an trailing empty campaign if self.state_event_dispatchers[-1] is not None: msg = "In the AgroManagement definition, the last campaign with start date '%s' contains StateEvents. " \ "When specifying StateEvents, the end date of the campaign must be explicitly" \ "given by a trailing empty campaign." raise exc.PCSEError(msg) # Walk over the crop calendars and timed events to get the last date. cc_dates = [] te_dates = [] for cc, teds in zip(self.crop_calendars, self.timed_event_dispatchers): if cc is not None: cc_dates.append(cc.get_end_date()) if teds is not None: te_dates.extend([t.get_end_date() for t in teds]) # If
<reponame>chandu088/p # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import math import os import time from tensorflow.contrib.learn.python.learn import export_strategy from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.python.framework import ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import basic_session_run_hooks, saver, server_lib from tensorflow.python.util import compat from polyaxon import Modes from polyaxon.estimators.estimator import Estimator from polyaxon.libs import getters from polyaxon.libs.utils import new_attr_context from polyaxon.processing.input_data import create_input_data_fn class Experiment(object): """Experiment is a class containing all information needed to train a model. After an experiment is created (by passing an Estimator and inputs for training and evaluation), an Experiment instance knows how to invoke training and eval loops in a sensible fashion for distributed training. None of the functions passed to this constructor are executed at construction time. They are stored and used when a method is executed which requires it. Args: estimator: Object implementing Estimator interface. train_input_fn: function, returns features and labels for training. eval_input_fn: function, returns features and labels for evaluation. If `eval_steps` is `None`, this should be configured only to produce for a finite number of batches (generally, 1 epoch over the evaluation data). train_steps: Perform this many steps of training. default: None, means train forever. eval_steps: `evaluate` runs until input is exhausted (or another exception is raised), or for `eval_steps` steps, if specified. train_hooks: A list of monitors to pass to the `Estimator`'s `fit` function. eval_hooks: A list of `SessionRunHook` hooks to pass to the `Estimator`'s `evaluate` function. eval_delay_secs: Start evaluating after waiting for this many seconds. continuous_eval_throttle_secs: Do not re-evaluate unless the last evaluation was started at least this many seconds ago for continuous_eval(). eval_every_n_steps: (applies only to train_and_evaluate). the minimum number of steps between evaluations. Of course, evaluation does not occur if no new snapshot is available, hence, this is the minimum. delay_workers_by_global_step: if `True` delays training workers based on global step instead of time. export_strategies: A list of `ExportStrategy`s, or a single one, or None. train_steps_per_iteration: (applies only to continuous_train_and_evaluate). Perform this many (integer) number of train steps for each training-evaluation iteration. With a small value, the model will be evaluated more frequently with more checkpoints saved. If `None`, will use a default value (which is smaller than `train_steps` if provided). Raises: ValueError: if `estimator` does not implement Estimator interface, or if export_strategies has the wrong type. """ def __init__(self, estimator, train_input_fn, eval_input_fn, train_steps=None, eval_steps=10, train_hooks=None, eval_hooks=None, eval_delay_secs=0, continuous_eval_throttle_secs=60, eval_every_n_steps=1, delay_workers_by_global_step=False, export_strategies=None, train_steps_per_iteration=100): if not isinstance(estimator, Estimator): raise ValueError("`estimator` must implement `Estimator`.") super(Experiment, self).__init__() # Immutable fields. self._estimator = estimator self._train_steps = train_steps self._eval_steps = eval_steps self._set_export_strategies(export_strategies) self._train_hooks = train_hooks[:] if train_hooks else [] self._eval_hooks = eval_hooks[:] if eval_hooks else [] self._train_input_fn = train_input_fn self._eval_input_fn = eval_input_fn self._eval_delay_secs = eval_delay_secs self._continuous_eval_throttle_secs = continuous_eval_throttle_secs self._eval_every_n_steps = eval_every_n_steps self._delay_workers_by_global_step = delay_workers_by_global_step if train_steps_per_iteration is not None and not isinstance(train_steps_per_iteration, int): raise ValueError("`train_steps_per_iteration` must be an integer.") self._train_steps_per_iteration = train_steps_per_iteration @property def estimator(self): return self._estimator @property def train_steps(self): return self._train_steps @property def eval_steps(self): return self._eval_steps def _set_export_strategies(self, values): # pylint: disable=missing-docstring export_strategies = [] if not values: self._export_strategies = () return if isinstance(values, export_strategy.ExportStrategy): export_strategies.append(values) else: for value in values: if not isinstance(value, export_strategy.ExportStrategy): raise ValueError( "`export_strategies` must be an ExportStrategy, an iterable of " "ExportStrategy, or `None`, found {}.".format(value)) export_strategies.append(value) self._export_strategies = tuple(export_strategies) def reset_export_strategies(self, new_export_strategies=None): """Resets the export strategies with the `new_export_strategies`. Args: new_export_strategies: A new list of `ExportStrategy`s, or a single one, or None. Returns: The old export strategies. """ old_export_strategies = self._export_strategies self._set_export_strategies(new_export_strategies) return old_export_strategies def extend_train_hooks(self, additional_hooks): """Extends the hooks for training.""" self._train_hooks.extend(additional_hooks) def extend_eval_hooks(self, additional_hooks): """Extends the hooks for training.""" self._eval_hooks.extend(additional_hooks) def _start_server(self): """Creates, starts, and returns a server_lib.Server.""" config = self._estimator.config if (not config.cluster_spec or not config.task_type or not config.master or config.task_id is None): raise ValueError("Could not start server; be sure to specify " "cluster_spec, task_type, master, and task in " "RunConfig or set the TF_CONFIG environment variable.") server = server_lib.Server(config.cluster_spec, job_name=config.task_type, task_index=config.task_id, config=config.tf_config, start=False) server.start() return server def _call_train(self, input_fn=None, steps=None, hooks=None, max_steps=None): return self._estimator.train( input_fn=input_fn, steps=steps, max_steps=max_steps, hooks=hooks) def _call_evaluate(self, input_fn=None, steps=None, name=None, checkpoint_path=None, hooks=None): return self._estimator.evaluate( input_fn=input_fn, steps=steps, name=name, checkpoint_path=checkpoint_path, hooks=hooks) def _has_training_stopped(self, eval_result): """Determines whether the training has stopped.""" if not eval_result: return False global_step = eval_result.get(ops.GraphKeys.GLOBAL_STEP) return global_step and self._train_steps and global_step >= self._train_steps def _continuous_eval(self, input_fn, name, delay_secs, throttle_delay_secs, evaluate_checkpoint_only_once=True, continuous_eval_predicate_fn=None): """Run continuous eval. Runs infinite eval on the evaluation data set. This function starts evaluating after `delay_secs` seconds and then runs no more than one evaluation (with `self._eval_steps` steps each time) per `throttle_delay_secs`. If `train_steps` is not None, will return after global_step reaches `train_steps`. Args: input_fn: The input to use for this eval. name: A string appended to the folder name of evaluation results. delay_secs: Start evaluating after this many seconds. If None, defaults to self._eval_delay_secs. throttle_delay_secs: Do not re-evaluate unless the last evaluation was started at least this many seconds ago. If None, defaults to self._continuous_eval_throttle_secs. evaluate_checkpoint_only_once: Whether to skip evaluation of checkpoints that have already been evaluated. Default is `True`. continuous_eval_predicate_fn: A predicate function determining whether to continue eval after each iteration. `predicate_fn` takes the evaluation results as arguments. At the beginning of evaluation, the passed eval results will be None so it's expected that the predicate function handles that gracefully. When `predicate_fn` is not specified, continuous eval will run in an infinite loop (if `train_steps` is None) or exit once global step reaches `train_steps`. Raises: ValueError: if `continuous_eval_predicate_fn` is neither None nor callable. """ if continuous_eval_predicate_fn is not None and not callable(continuous_eval_predicate_fn): raise ValueError("`continuous_eval_predicate_fn` must be a callable, or None.") if delay_secs is None: delay_secs = self._eval_delay_secs if throttle_delay_secs is None: throttle_delay_secs = self._continuous_eval_throttle_secs if delay_secs: logging.info("Waiting {} secs before starting eval.".format(delay_secs)) time.sleep(delay_secs) previous_path = None eval_result = None last_warning_time = 0 while not continuous_eval_predicate_fn or continuous_eval_predicate_fn(eval_result): # Exit if we have already reached number of steps to train. if self._has_training_stopped(eval_result): logging.info("Exiting continuous eval, global_step={} >= train_step={}".format( eval_result[ops.GraphKeys.GLOBAL_STEP], self._train_steps)) return start = time.time() error_msg = None latest_path = saver.latest_checkpoint(self._estimator.model_dir) if not latest_path: error_msg = "Estimator is not fitted yet. " \ "Will start an evaluation when a checkpoint is ready." elif evaluate_checkpoint_only_once and latest_path == previous_path: error_msg = "No new checkpoint ready for evaluation." if error_msg: # Print warning message every 10 mins. eval_result = {} if time.time() - last_warning_time > 600: logging.warning(error_msg) last_warning_time = time.time() else: eval_result = self._call_evaluate(input_fn=input_fn, steps=self._eval_steps, name=name, checkpoint_path=latest_path, hooks=self._eval_hooks) # Ensure eval result is not None for next round of evaluation. if not eval_result: eval_result = {} self._maybe_export(eval_result, checkpoint_path=latest_path) # Clear warning timer and update last evaluated checkpoint last_warning_time = 0 previous_path = latest_path duration = time.time() - start if duration < throttle_delay_secs: difference = throttle_delay_secs - duration logging.info("Waiting {} secs before starting next eval run.".format(difference)) time.sleep(difference) def _prepare_train(self, delay_secs): start = time.time() # Start the server, if needed. It's important to start the server before # we (optionally) sleep for the case where no device_filters are set. # Otherwise, the servers will wait to connect to each other before starting # to train. We might as well start as soon as we can. config = self._estimator.config if (config.environment != run_config.Environment.LOCAL and config.environment != run_config.Environment.GOOGLE and config.cluster_spec and config.master): self._start_server() extra_hooks = [] if delay_secs is None: task_id = self._estimator.config.task_id or 0 if self._delay_workers_by_global_step: # Wait 5500 global steps for the second worker. Each worker waits more # then previous one but with a diminishing number of steps. waiting_time = int(8000.0 * math.log(task_id + 1)) extra_hooks.append(basic_session_run_hooks.GlobalStepWaiterHook(waiting_time)) delay_secs = 0 else: # Wait 5 secs more for each new worker up to 60 secs. delay_secs = min(60, task_id * 5) if delay_secs > 0: elapsed_secs = time.time() - start remaining = delay_secs - elapsed_secs logging.info("Waiting {} secs before starting training.".format(remaining)) time.sleep(delay_secs) return delay_secs, extra_hooks def train(self, delay_secs=None): """Fit the estimator using the training data. Train the estimator for `self._train_steps` steps, after waiting for `delay_secs` seconds. If `self._train_steps` is `None`, train forever. Args: delay_secs: Start
checked FIRST, then the number of stars #is compared along with ZWARNING and the number of other QSOs. #Failing these checks a QFLAG classification is assigned (meaning VI later). if tQS_check >= 2: out_Qflag = 'STAR' elif ((tQS_check < 2) & (zw_in == 0)): out_Qflag = 'QSO' else: out_Qflag = 'QFLAG' #Send the classification flag back to obj_class() loop that called it. return out_Qflag ################################################################################ # # # FUNCTION: OBJECT CLASSIFICATION MAIN # # PURPOSE: This is the function that actually iterates through all of the # # objects in the cutdown catalog file input by the user. It will call# # gal3_confirm() and qso_confirm() in different for loops. # # # # ACCEPTS: ifile - The name of the input cutdown catalog file, from cat_cut() # # RETURNS: None # # INPUTS: None # # OUTPUTS: --A new FITS catalog. The spAll file is recreated with a new # # column up front called 'AUTOCLASS_DR14Q' which holds the # # classification flags from mask_arr. # # --User-friendly text output about current progress. # # OUTPUTS TO: FITS file: Folder where the script was called from. # # TEXT : Screen output. # # CALLED BY: None # # # ################################################################################ def obj_class(ifile): #Open the cutdown catalog file and find the total number of objects. data = fits.open(ifile)[1].data dnum = len(data) #Counts the number of objects by type at the beginning for feedback and #benchmark purposes. Outputs this to the screen. nStar = len(np.where(data['CLASS'] == 'STAR')[0]) nGal = len(np.where(data['CLASS'] == 'GALAXY')[0]) nQSO = len(np.where(data['CLASS'] == 'QSO')[0]) print('\n') print('----------------------------------------') print('Number of Starting Objects : '+str(dnum)) print('Number of Stars before : '+str(nStar)) print('Number of Galaxies before : '+str(nGal)) print('Number of QSOs before : '+str(nQSO)) print('----------------------------------------') print('\n') #This is a placeholder array to hold local flags based on the algorithm. It #will become a new column in the output FITS file called 'AUTOCLASS_DR14Q'. mask_arr = np.chararray(dnum,itemsize=6,unicode=True) #First Classification | STEP 1 in pseudocode. wStar = np.where(data['CLASS'] == 'STAR')[0] mask_arr[wStar] = 'STAR' #Second Classification | STEP 2 in pseudocode. wGal = np.where((data['CLASS'] == 'GALAXY')&(data['Z'] < 1))[0] mask_arr[wGal] = 'GALAXY' #spZall Classifications #Classifications 3 and 4 require the spZall files to be checked. To minimize #the number of times a file is loaded into memory, a list of necessary files #for the GALAXY classification (3) is made and downloaded in turn. While #these files are present, the QSO objects are also checked so that the files #won't need to be loaded again later. #GALAXY-CLASS objects wG2 = np.where((data['CLASS'] == 'GALAXY')&(data['Z'] >= 1))[0] count = len(wG2) #QSO-CLASS objects wQSO = np.where(data['CLASS'] == 'QSO')[0] countQ = len(wQSO) #Objects are identified by a unique combination of plate number, mjd, and #fiberid. This generates a list of single values for each object, so unique #combinations can be found. This allows the use of np.where() calls instead #of nested for loops later. pm = (data['PLATE']**2) + (data['MJD'] - 1) #This finds the unique combinations of plate and mjd which define the spZall #files. In each file each fiberid will have a number of pipeline fits. #These are the unique plate/mjd combos for GALAXIES. uniq,uniqindices = np.unique(pm[wG2], return_index=True) ugplates = data['PLATE'][wG2[uniqindices]] umjd = data['MJD'][wG2[uniqindices]] #This does the same as above, but for QSOs. Here this is used not to find #the files for loading, but to identify which objects will appear in the #files that are loaded for the GALAXY-3 classification. uQ,uQind = np.unique(pm[wQSO],return_index=True) uqplates = data['PLATE'][wQSO[uQind]] uqmjd = data['MJD'][wQSO[uQind]] #Makes a list of file names. This is passed to the for loop later. file_list = np.chararray(len(ugplates),itemsize=23,unicode=True) for j in range(len(ugplates)): file_list[j] = 'spZall-{}-{}.fits'.format(ugplates[j],umjd[j]) #A counter used by the for loop. file_count = len(file_list) #Initializing the plate, mjd, fiber, run2d variables. plate = 0 mjd = 0 fiber = 0 run2d = 0 #Where are the spZall files stored uloc0 = os.environ.get('BOSS_SPECTRO_REDUX') #This loop will go through the files identified in file_list (from GAL-3 #plate/mjd combos). It will iterate through all of the spZall files. While #doing so it outputs a single line about its progress that is overwritten. for i in range(file_count): #Define the PLATE and MJD we are working with on this iteration. plate = ugplates[i] mjd = umjd[i] run2d = data['RUN2D'][wG2[uniqindices[i]]] #Get the file name for the current spZall file. z_file = '{}/{}/{}/{}/{}'.format(uloc0,run2d,plate,run2d,file_list[i]) #dall is the spZall file currently being checked. dall = fits.open(z_file)[1].data #Each object, identified by plate/mjd/fiber has 134 separate fits in the #spZall file. These will find only the fibers appearing in the spZall #file we are currently using. objs = np.where((data['PLATE'][wG2] == plate)&(data['MJD'][wG2] == mjd))[0] fibers,fiberind = np.unique(data['FIBERID'][wG2[objs]],return_index=True) #Each spZall file holds 1000 fibers. This iterates through the FIBERIDs #identified in the previous step, then passes the information to #the gal3_confirm() function above. for h in range(len(fibers)): fiber_t = fibers[h] mask_arr[wG2[objs[fiberind[h]]]] = gal3_confirm(dall,plate,mjd,fiber_t) #This does the same as above but for any QSOs that might be in the same #files. objsQ = np.where((data['PLATE'][wQSO] == plate)&(data['MJD'][wQSO] == mjd))[0] fibQ,fibQind = np.unique(data['FIBERID'][wQSO[objsQ]],return_index=True) #Again, this iterates through the FIBERIDs. ZWARNING is found first, #then everything is passed to the qso_confirm() function above. for k in range(len(fibQ)): fiber_q = fibQ[k] z_warn = data['ZWARNING'][wQSO[objsQ[fibQind[k]]]] mask_arr[wQSO[objsQ[fibQind[k]]]] = qso_confirm(dall,plate,mjd,fiber_q,z_warn) #This outputs the single line that is overwritten as files are completed st = '\rFile: {} | spZall Complete: {}/{}'.format(file_list[i],i+1,file_count) sys.stdout.write(st) sys.stdout.flush() #Leftover QSOs #This is for any classification 4 QSOs that weren't covered by the GAL-3 #list of spZall files. It is the same as above, but only for QSOs. wlq = np.where((data['CLASS'] == 'QSO')&(mask_arr == ''))[0] ulq,ulqindex = np.unique(pm[wlq],return_index=True) ulplates = data['PLATE'][wlq[ulqindex]] ulmjd = data['MJD'][wlq[ulqindex]] #Creates a new list of spZall files, for the leftover QSOs. file_lq = np.chararray(len(ulplates),itemsize=23,unicode=True) for j in range(len(ulplates)): file_lq[j] = 'spZall-{}-{}.fits'.format(ulplates[j],ulmjd[j]) file_lq_count = len(file_lq) #Reinitialize all of the object address information. Don't want old data #messing us up. plate = 0 mjd = 0 fiber = 0 run2d = 0 print('\n') print('----------------------------------------') print('LEFTOVER QSOs') print('----------------------------------------') for i in range(file_lq_count): plate = ulplates[i] mjd = ulmjd[i] run2d = data['RUN2D'][wlq[ulqindex[i]]] z_file = '{}/{}/{}/{}/{}'.format(uloc0,run2d,plate,run2d,file_lq[i]) dall = fits.open(z_file)[1].data objsQ = np.where((data['PLATE'][wlq] == plate)&(data['MJD'][wlq] == mjd))[0] fibQ,fibQind = np.unique(data['FIBERID'][wlq[objsQ]],return_index=True) for k in range(len(fibQ)): fiber_q = fibQ[k] z_warn = data['ZWARNING'][wlq[objsQ[fibQind[k]]]] mask_arr[wlq[objsQ[fibQind[k]]]] = qso_confirm(dall,plate,mjd,fiber_q,z_warn) st1 = '\rFile: {} | spZall Complete: {}/{}'.format(file_lq[i],i+1,file_lq_count) sys.stdout.write(st1) sys.stdout.flush() #TESTING FEEDBACK #These lines are only for feedback before and after for quick reference on how #many objects need VI. We're hoping for no more than 7% neededing followup. wflagged = np.where((mask_arr == 'QFLAG') | (mask_arr == ''))[0] nflagged = len(wflagged) wt2 = np.where(mask_arr == 'STAR')[0] wt3 = np.where(mask_arr == 'GALAXY')[0] wt4 = np.where(mask_arr == 'QSO')[0] prct_vi = (float(nflagged) / float(dnum)) * 100 print('\n') print('----------------------------------------') print('Number of Stars after : '+str(len(wt2))) print('Number of Galaxies after : '+str(len(wt3))) print('Number of QSOs after : '+str(len(wt4))) print('----------------------------------------') print('Number to be Visually inspected : '+str(nflagged)) print('Percent to be Visually inspected: {0:.2f}%'.format(prct_vi)) print('\n') #After classification flags are completed, find any objects still left Unclassified #or that have a QFLAG (which is an unclassified QSO). Mark all objects for #visual inspection followup. mask_arr[wflagged] = 'VI' #We also want to visually inspect QSOs with pipeline Z > 3.5. wzu = np.where((mask_arr=='QSO')&(data['Z']>3.5))[0] mask_arr[wzu] = 'VI' min_mjd = np.amin(data['MJD']) max_mjd = np.amax(data['MJD']) mjd_range = '{0:05d}-{1:05d}'.format(min_mjd,max_mjd) spvname = 'spAll-v5_13_0' #Write out a fits file with this new classification column. Also modifies #the primary HDU header (extension 0) to keep information on what the Input #spAll file was, what DR this is for, and what program ran the damn thing. prim_hdrc = fits.Header() prim_hdrc['ALG_FILE']=('spAll_class_full.py','Algorithm file used') prim_hdrc['DATA_REL']=(spvname,'spAll file used') prim_hdrc['MJD_RNGE']=(mjd_range,'MJD Range of objects') prim_hduc = fits.PrimaryHDU(header=prim_hdrc) #Tack this new classification column onto the columns from the cutdown catalog. mask_col = fits.ColDefs([fits.Column(name='AUTOCLASS_DR14Q',format='6A',array=mask_arr)]) data_cols = data.columns data_hduc = fits.BinTableHDU.from_columns(mask_col + data_cols) ofile_dtag = time.strftime('%Y%m%d') data_ofc = fits.HDUList([prim_hduc,data_hduc]) out_file_namec = '../data/DR16Q_autoclass_{}'.format(ofile_dtag) classified_name = ct.fet(data_ofc,out_file_namec) return classified_name #############################CATALOG COMBINE#################################### def cat_combine(inrec_name): #These are just the names of the columns that appear in the spAll files. spcolkeep = np.array(['RA','DEC','AUTOCLASS_DR14Q','THING_ID','PLATE','MJD','FIBERID', 'Z','ZWARNING','BOSS_TARGET1','EBOSS_TARGET0','EBOSS_TARGET1', 'EBOSS_TARGET2','ANCILLARY_TARGET1','ANCILLARY_TARGET2',
from mongoengine import * from mongoengine.connection import _get_db from datetime import datetime from bson import ObjectId import csv import collections import helpers import unittest default_currency = "CAD" # Connect the DB, just need to install mongoDB, might need to create the DB? connect('ledger-simple-test2') # Exceptions are transaction that comes with same description but could go to different account. # they hare going to an account that we are reconciling also. EXCEPTIONS = [{'account_number': 213305, 'account_type': 'PCA', 'description': 'Transfer - AccesD - Internet', 'others_accounts': [709733, 716528] }] ACCOUNT_TYPE = {'BC': 'Bank and Cash', 'FA': 'Fixed Assets', 'NCL': 'Non-Current Liability', 'E': 'Expense', 'CA': 'Current Assets', 'R': 'Receivable', 'CL': 'Current Liability', 'P': 'Payable', 'I': 'Income', 'OI': 'Other Income', 'DC': 'Direct Cost', 'CYE': 'Current Year Earning', 'SUM': 'Sum', } # ACTION_VALUES = {'Previous balance': self.update_previous_balance, # 'Purchases/debits': self.update_purchase, # 'Payments/credits': self.update_payments, # 'New current balance ($):': self.update_new_balance, # 'Credit charges ($)': self.update_frais_credits, # #'Statement date:': self.update_name, # } def init_counters(): # Init the DB for counters, only required initially. counter1 = Counters(id2='UserId', sequence_value=0) counter1.save() counter3 = Counters(id2='JournalEntryId', sequence_value=0) counter3.save() counter4 = Counters(id2='StatementLineId', sequence_value=0) counter4.save() counter5 = Counters(id2='StatementId', sequence_value=0) counter5.save() counter6 = Counters(id2='TransactionId', sequence_value=0) counter6.save() counter7 = Counters(id2='MonthlyBillId', sequence_value=0) counter7.save() class Counters(Document): id2 = StringField(max_length=20) sequence_value = IntField() def getNextSequenceValue(sequenceName): # sequenceDocument = Database.find_one_update('counters', {"id": sequenceName}, {"$inc":{"sequence_value":1}}) counter = Counters.objects.get(id2=sequenceName) # print("len(counter) = ", len(counter)) Counters.objects(id2=sequenceName).update_one(inc__sequence_value=1) counter.reload() sequenceDocument = counter.sequence_value # print("sequence number for %s is now : %s" % (sequenceName, sequenceDocument)) return sequenceDocument class User(Document): id_ = IntField(unique=True, required=True) name = StringField(unique=True) def init_2_users(): user1_name = input("What is the name of the user 1 >> ") user2_name = input("What is the name of the user 2 >> ") user1 = User.add_user(user1_name) user2 = User.add_user(user2_name) def add_user(name): new_user = User(id_=getNextSequenceValue('UserId'), name=name) new_user.save() return new_user def get_user(name): user1 = User.objects.get(name=name) return user1 class Account(Document): """This is the Chart of Account accounts items""" number = IntField(min_value=100000, max_value=999999, unique=True) parent_account = ReferenceField("self", default=None) # Not in use child_account = ReferenceField("self", default=None) # Not in use description = StringField(max_length=200, required=True) type_ = StringField(max_length=3, choices=ACCOUNT_TYPE) user_ratio = DictField(default=None) # Needs testing, should be a dict account_number = IntField(default=None) account_type = StringField(max_length=3) reconciled = BooleanField(default=False) def header(): return "%6s %40s %5s %12s %14s %12s" %('number', 'description', 'type_', 'user_ratio', 'account_number', 'account_type') def __str__(self): return "%6s %40s %5s %12s %14s %12s" %(self.number, self.description, self.type_, self.user_ratio, self.account_number, self.account_type) def add_account(number, parent_account, child_account, description, type_, user_ratio, account_number=None, account_type=None, reconciled=False): new_account = Account(number=number, parent_account=parent_account, child_account=child_account, description=description, type_=type_, user_ratio=user_ratio, account_number=account_number, account_type=account_type, reconciled=reconciled ) new_account.save() print(new_account.id) return new_account def get_account(account_number, account_type): account1 = Account.objects.get(account_number=account_number, account_type=account_type) return account1 def get_account_by_number(number): account1 = Account.objects.get(number=number) return account1 def import_accounts_from_file(filename): with open(filename, "r") as the_file: csv_reader = csv.reader(the_file, delimiter=',') line_count = 0 last_sum = None for row in csv_reader: if line_count == 0: print('skipping first line') line_count += 1 continue else: if row[4] == 'Sum': parent_account = None else: parent_account = last_sum # Get ratios if row[5] != '': user_ratio = {str(User.objects[0].id_): float(row[5])/100, str(User.objects[1].id_): float(row[6])/100} else: user_ratio = None # Account type and number for bank account if row[2] != '': account_number = int(row[2]) account_type = row[3] else: account_type = None account_number = None #facultative reconciled parameter if row[7] in ['TRUE', 'True', 'y', 'Y']: reconciled_found = True else: reconciled_found = False dict_values = list(ACCOUNT_TYPE.values()).index(row[4]) print("dict_values = ", dict_values) dict_keys = list(ACCOUNT_TYPE.keys()) new_account = Account.add_account(number=row[0], parent_account=parent_account, child_account=None, description=row[1], type_=dict_keys[dict_values], user_ratio=user_ratio, account_number=account_number, account_type=account_type, reconciled=reconciled_found ) new_account.save() if row[4] == 'Sum': last_sum = new_account line_count += 1 class Statement(Document): id_ = IntField(required=True) date = DateTimeField(default=datetime.now()) stop_date = DateTimeField(default=None) filename = StringField(required=True) start_balance = FloatField() end_balance = FloatField() closed = BooleanField(default=False) #lines = ListField(ReferenceField(StatementLine)) def init_statement(filename,): # check if already exist. new_statement = Statement(id_=getNextSequenceValue('StatementId'), date=None, filename=filename, start_balance=None, end_balance=None ) new_statement.name = None new_statement.transactions = [] new_statement.start_date = None new_statement.stop_date = None new_statement.previous_balance_report = 0 # self.previous_balance = 0 new_statement.purchase_report = 0 new_statement.purchase = 0 new_statement.payments_report = 0 new_statement.payments = 0 new_statement.frais_credits_report = 0 new_statement.frais_credits = 0 new_statement.new_balance_report = 0 new_statement.new_balance = 0 new_statement.save() print("new statement saved: ", new_statement.id) return new_statement def print(self): # TODO: to be arrange to work print("MonthlyBill for : %s to %s" %(self.start_date, self.stop_date)) print("%10s, %30s, %10s, %3s" %('date', 'desctiption', 'amount', 'typed')) for transaction in self.transactions: print("%10s, %30s, %8.2f, %3s" %(transaction.date, transaction.description, transaction.amount, transaction.typed)) def recap(self): # TODO: to be arrange to work self.purchase = 0 self.payments = 0 self.frais_credits = 0 self.new_balance = 0 grouped_expense = {} # expense grouped by category distributed_expense = {} # expense summed by paying entity for key in CATEGORY.keys(): if CATEGORY[key]['paying'] == True: distributed_expense[key] = {"expense": 0, "payment": 0} for transaction in self.transactions: if transaction.typed not in grouped_expense.keys(): grouped_expense[transaction.typed] = 0 if transaction.amount > 0: grouped_expense[transaction.typed] += transaction.amount if transaction.typed != "fc": self.purchase += transaction.amount else: self.frais_credits += transaction.amount if transaction.typed in RATIO.keys(): for key in RATIO[transaction.typed].keys(): distributed_expense[key]["expense"] += RATIO[transaction.typed][key]*transaction.amount else: distributed_expense[transaction.typed]["expense"] += transaction.amount elif transaction.amount < 0: self.payments += transaction.amount if transaction.typed in RATIO.keys(): for key in RATIO[transaction.typed].keys(): distributed_expense[key]["payment"] += RATIO[transaction.typed][key]*transaction.amount else: distributed_expense[transaction.typed]["payment"] += transaction.amount else: print("zero value transaction skipping.") print(" ") print("Printing recap for transaction between %s and %s." %(str(self.start_date)[:-9], str(self.stop_date)[:-9])) for element in grouped_expense: print(element, grouped_expense.get(element)) for element in distributed_expense: print(element, distributed_expense.get(element)) self.new_balance = self.previous_balance_report + self.purchase + self.payments + self.frais_credits print(" ") print("%20s %10s %10s" %('Total', 'Calculated', 'Actual')) print("%20s %10s %10.2f" %('Previous balance', "-", self.previous_balance_report)) print("%20s %10.2f %10.2f" %('Purchase/debits', self.purchase, self.purchase_report)) print("%20s %10.2f %10.2f" %('Payments/credits', self.payments, self.payments_report)) print("%20s %10.2f %10.2f" %('Frais de credits', self.frais_credits, self.frais_credits_report)) print("%20s %10.2f %10.2f" %('New current balance', self.new_balance, self.new_balance_report)) def update_purchase(self, value): value = value.replace(',', '') self.purchase_report = float(value) print("Updated purchase = ", float(value)) def update_payments(self, value): value = value.replace(',', '') self.payments_report = float(value) print("update_payments = ", float(value)) def update_new_balance(self, value): value = value.replace(',', '') self.new_balance_report = float(value) print("update_new_balance = ", float(value)) def update_previous_balance(self, value): value = value.replace(',', '') if self.previous_balance_report == 0: self.previous_balance_report = float(value) print("previous_balance = ", self.previous_balance_report) def update_frais_credits(self, value): value = value.replace(',', '') self.frais_credits_report = float(value) print("update frais de credits = ", value) # def update_name(self, value): # self.name = value # print("Updated name = ", self.name) def update_values(self, list_): ACTION_VALUES = {'Previous balance': self.update_previous_balance, 'Purchases/debits': self.update_purchase, 'Payments/credits': self.update_payments, 'New current balance ($):': self.update_new_balance, 'Credit charges ($)': self.update_frais_credits, #'Statement date:': self.update_name, } action = ACTION_VALUES.get(list_[0]) if action: action(list_[1]) def import_statement_from_file(filename, delimiter, header=False): """import a file into a statement and statement lines, requires the filename and delimiter. header = True mean skip the first line. """ created_statement = False current_statement = None if Statement.objects(): # check if a statement objects already exists. if len(list(Statement.objects(filename=filename))) > 1: # if a statement already exist confirm it has a different filename print("Filename already imported.") choice = input("Do you want to continue with file import? [yes] / no >> ") if choice == 'no' or choice == 'n': return None elif choice == 'yes' or choice == 'y' or choice == "": current_statement = Statement.objects.get(filename=filename) if current_statement == None: #create the new statement instance. current_statement = Statement.init_statement(filename) created_statement = True current_statement_list = list() current_statement_list.append(current_statement) with open(filename) as the_file: csv_reader = csv.reader(the_file, delimiter=delimiter) line_counter = 0 first_line = True header_passed = False for line in csv_reader: if header == True and header_passed == False: # skip first line if header = true header_passed = True continue if len(line) > 1: # find the first line that is now empty. if created_statement == True: if len(line) == 15: if line[14] not in [None, 0, '']: destination_account = int(line[14]) else: destination_account = None # check for description with amount, remove amount as it would wrongly not find accounts afterwards. if '$' in line[5]: description_list = line[5].split(':') split_description = description_list[0] else: split_description = line[5] new_line = StatementLine.create_line(date=line[3], account_number=line[1], account_type=line[2], line_number=line[4], description=split_description, credit=StatementLine.to_float_or_zero(line[7]), debit=StatementLine.to_float_or_zero(line[8]), interest=StatementLine.to_float_or_zero(line[9]), advance=StatementLine.to_float_or_zero(line[11]), reimbursement=StatementLine.to_float_or_zero(line[12]), balance=StatementLine.to_float_or_zero(line[13]), statement=current_statement, destination_account=destination_account, ) # add newly imported line to statement. #current_statement.lines.append(new_line) elif created_statement == False: line_number_list = [] line_number_list.append(line[4]) new_line = list(StatementLine.objects(statement__in=current_statement_list).filter(line_number__in=line_number_list, account_number__in=[line[1]], account_type__in=[line[2]]))[0] new_line.date = line[3] new_line.account_number = line[1] new_line.account_type = line[2] new_line.line_number = line[4] new_line.description
import boto3 import json import re import math import os.path from datetime import * import common import compare_output from . import ecs_compares def main(text): regionList = ['us-east-1', 'us-west-1', 'us-west-2', 'eu-west-1', 'ap-northeast-1', 'ap-southeast-2'] region = regionList[0] cluster = "" ret = "" text.pop(0) # remove command name if len(text) == 0: return "You did not supply a query to run" if text[0] == 'help': return information() awsKeyId = None awsSecretKey = None awsSessionToken = None the_account = None tokens = [] if 'in' in text: while text[-1] != 'in': tokens.append(text.pop()) extractedRegion = re.search(r'[a-z]{2}-[a-z]+-[1-9]{1}', " ".join(tokens)) if extractedRegion: region = extractedRegion.group() tokens.remove(region) text.remove('in') # load default account from config config = None if os.path.isfile("./aws.config"): with open("aws.config") as f: config = json.load(f) if config.get('ecs'): for account in config['ecs']['Accounts']: if account["RoleArn"] == "" and account['AccountName'] == "": loadedApplications = account['Clusters'] if len(tokens) > 0 and config != None: for account in config['ecs']['Accounts']: if account['AccountName'] in tokens: the_account = account['AccountName'] tokens.remove(account['AccountName']) if account['RoleArn']: sts_client = boto3.client('sts') assumedRole = sts_client.assume_role(RoleArn=account['RoleArn'], RoleSessionName="AssumedRole") awsKeyId = assumedRole['Credentials']['AccessKeyId'] awsSecretKey = assumedRole['Credentials']['SecretAccessKey'] awsSessionToken = assumedRole['Credentials']['SessionToken'] break if len(tokens) > 0: return "Could not resolve " + " ".join(tokens) elif len(tokens) > 0: return "Could not locate aws.config file" session = boto3.session.Session(aws_access_key_id=awsKeyId, aws_secret_access_key=awsSecretKey, aws_session_token=awsSessionToken) if 'regions' in text: if 'clusters' in text: for region in regionList[:]: ecs = session.client("ecs", region_name=region) ret = ecs.list_clusters() if len(ecs.list_clusters()['clusterArns']) == 0: regionList.remove(region) return " ".join(regionList) ecs = session.client("ecs", region_name=region) if 'list' in text: text.remove("list") ret = "" if 'clusters' in text: clusters = ecs.list_clusters()['clusterArns'] if len(clusters) == 0: return "There are no clusters in this region: " + region for cluster in clusters: if the_account and account['RoleArn'] == "": if account['cluster_keyword'] in cluster: ret = ret + cluster.split('/')[-1] + '\n' else: ret = ret + cluster.split('/')[-1] + '\n' return ret # see if tasks is in user command elif tasks_check_text(text): tasks_lookup_term = tasks_get_lookup_term(text) fields = [] attachments = [] if not text: return ( "I need a cluster name to complete the requested operation. To view the cluster names, use 'jarvis ecs list clusters <region>'") if 'running' in text: text.remove("running") try: resulting_array = get_task_list(cluster=text[0], ecs=ecs) query_result = ecs.describe_tasks(cluster=text[0], tasks=resulting_array) instance_task_families = parse_tasks(query_result['tasks'], tasks_lookup_term, ecs) if not instance_task_families: return "No tasks where found matching the lookup term for tasks. To look up a particular task, use 'jarvis ecs list tasks---<optional term> running <cluster> [in <region/account>]' " for tasks in instance_task_families: fields.append({ 'title': tasks, 'value': 'Version: ' + str(instance_task_families[tasks]['version']) + '\nCount: ' + str(instance_task_families[tasks]['count']), 'short': True }) attachments.append({ 'fallback': 'List of Running Tasks', 'title': 'List of Running Tasks', 'fields': fields }) return attachments except Exception as e: print(("exception in tasks option is ", e)) return "Cluster " + text[0] + " was not found in region " + region else: return "No valid option for command jarvis ecs list tasks found. Please review /jarvis --help and try again." elif 'services' in text: text.remove("services") if len(text) == 0: return "I need a cluster name to complete the requested operation. To view the cluster names, use 'jarvis ecs list clusters <region>'" attachments = [] fields = [] try: sPaginator = ecs.get_paginator('list_services') sIterator = sPaginator.paginate(cluster=text[0]) for cluster in sIterator: services = [] for service in cluster['serviceArns']: services.append(service) if len(services) == 0: return "There doesn't seem to be any services in the cluster " + text[0] services_desc = ecs.describe_services(cluster=text[0], services=services) for service in services_desc['services']: image = ecs.describe_task_definition(taskDefinition=service['taskDefinition']) imagename = image['taskDefinition']['containerDefinitions'][0]['image'].split(':')[-1] servicename = service['serviceName'].split('/')[-1] ret = ret + servicename + "\t\t" + imagename + "\n" fields.append({ 'title': servicename, 'value': 'Version: ' + imagename, 'short': True }) attachments.append({ 'fallback': 'Service List', 'title': 'List of Services', 'fields': fields }) return attachments except Exception as e: print(e) return "Cluster " + text[0] + " was not found in region " + region elif 'compare' in text: text.remove("compare") if "with" in text and len([_f for _f in text if _f]) > 6 and len([_f for _f in text if _f]) < 10: # extract arguments from text for master and team ecs data master_args = [_f for _f in text[:text.index("with")] if _f] team_args = [_f for _f in text[text.index("with") + 1:] if _f] master_args_eval = eval_args(master_args, regionList) team_args_eval = eval_args(team_args, regionList) if master_args_eval and team_args_eval: config = None # load config file if os.path.isfile("./aws.config"): with open("aws.config") as f: config = json.load(f) if config: master_data = get_in_ecs_compare_data(config, master_args, master_args_eval) team_data = get_in_ecs_compare_data(config, team_args, team_args_eval) else: return "Config file was not loaded" if master_data and team_data: # retrieves the json from super jenkins with all build link data superjenkins_data = common.get_superjenkins_data(config["General"]["script_tags"]["beginning_tag"], config["General"]["script_tags"]["ending_tag"], config["General"]["build_link"], config["General"]["my_build_key"]) compared_data = ecs_compares.main_ecs_check_versions(master_data, team_data, config["General"]["jenkins"][ "branch_equivalent_tags"], superjenkins_data, team_data['service_exclude_list']) attachments = compare_output.slack_payload(compared_data, team_data['team_name']) return attachments else: return "Values were not retrieved" else: return "Invalid region or account information entered" else: return "Missing information to complete comparison" elif 'describe' in text or 'desc' in text: cw = session.client('cloudwatch', region_name=region) text.pop(0) createGraph = False if "graph" in text: text.remove("graph") createGraph = True if len(text) == 1: clustername = text[0] clusters = ecs.describe_clusters(clusters=[clustername]) if clusters['failures']: return "I could not find the cluster specified: " + clustername attachments = [] clustercpu = cw.get_metric_statistics(Namespace="AWS/ECS", MetricName="CPUUtilization", Dimensions=[{'Name': 'ClusterName', 'Value': clustername}], StartTime=datetime.today() - timedelta(days=1), EndTime=datetime.today(), Period=1800, Statistics=['Average'], Unit='Percent') clustermem = cw.get_metric_statistics(Namespace="AWS/ECS", MetricName="MemoryUtilization", Dimensions=[{'Name': 'ClusterName', 'Value': clustername}], StartTime=datetime.utcnow() - timedelta(days=1), EndTime=datetime.utcnow(), Period=1800, Statistics=['Average'], Unit='Percent') cpudata = [] memdata = [] for datapoint in clustercpu['Datapoints']: cpudata.append([datapoint['Timestamp'], datapoint['Average']]) for datapoint in clustermem['Datapoints']: memdata.append([datapoint['Timestamp'], datapoint['Average']]) cpudata = sorted(cpudata, key=lambda x: x[0]) memdata = sorted(memdata, key=lambda x: x[0]) clustercpu = math.ceil(cpudata[0][1]) clustercpu = int(clustercpu) clustermem = math.ceil(memdata[0][1]) clustermem = int(clustermem) clusters = clusters['clusters'][0] fields = [{ 'title': 'Registered Instances', 'value': clusters['registeredContainerInstancesCount'], 'short': True }, { 'title': 'Active Services', 'value': clusters['activeServicesCount'], 'short': True }, { 'title': 'Running Tasks', 'value': clusters['runningTasksCount'], 'short': True }, { 'title': 'Pending Tasks', 'value': clusters['pendingTasksCount'], 'short': True }] if not createGraph: fields.append({ 'title': 'Memory Usage', 'value': str(clustermem) + "%", 'short': True }) fields.append({ 'title': 'CPU Usage', 'value': str(clustercpu) + "%", 'short': True }) attachments.append({ 'fallback': 'Cluster: ' + clusters['clusterName'], 'title': 'Cluster ' + clusters['clusterName'], 'fields': fields, 'color': 'good' }) if createGraph: attachments.append(common.create_graph('Graphing Cluster CPU and Memory Usage over 1 day', 'Cluster CPU', [i[1] for i in cpudata], 'Cluster Memory', [i[1] for i in memdata], [i[0].strftime("%I%M") for i in cpudata])) return attachments elif len(text) == 2: attachments = [] if len(text) < 2: return """I need a cluster name and a service name to complete the requested operation. To view the cluster names, use 'jarvis ecs list clusters <region>' To view the services, use 'jarvis ecs list services <cluster> <region>'""" matched = False matchedCount = 0 servicename = text[0] clustername = text[1] try: services = ecs.list_services(cluster=text[1])['serviceArns'] except Exception as e: print(e) return "Cluster " + text[1] + " was not found in region " + region for service in services: if text[0] in service and not matched: matched = True matchedCount += 1 try: services_desc = ecs.describe_services(cluster=text[1], services=[service]) except Exception as e: print(e) return "Cluster " + text[0] + " was not found in region " + region for service in services_desc['services']: image = ecs.describe_task_definition(taskDefinition=service['taskDefinition']) imagename = image['taskDefinition']['containerDefinitions'][0]['image'].split(':')[-1] servicename = service['serviceName'].split('/')[-1] attachments.append( { 'fallback': 'Service ' + servicename, 'title': servicename, 'fields': [{ 'title': 'Deployment', 'value': imagename, 'short': True }, { 'title': 'Updated At', 'value': service['deployments'][0]['updatedAt'].strftime("%Y-%m-%d %H:%M %z") , 'short': True }, { 'title': 'CPU Reservation', 'value': str(image['taskDefinition']['containerDefinitions'][0]['cpu']) + " Units", 'short': True }, { 'title': 'Memory Reservation', 'value': str( image['taskDefinition']['containerDefinitions'][0]['memory']) + " Megabytes", 'short': True }, { 'title': 'Running Tasks', 'value': service['runningCount'], 'short': True }], 'color': 'good' } ) elif text[0] in service and matched: matchedCount += 1 if matchedCount > 1: attachments.append({ 'fallback': 'Service ' + servicename, 'title': str(matchedCount) + ' Services Matched', 'text': 'If this is not the service you asked for, you can list the services using jarvis ecs list services', 'color': 'warning' }) if matched: if createGraph: servicecpu = cw.get_metric_statistics(Namespace="AWS/ECS", MetricName="CPUUtilization", Dimensions=[{'Name': 'ClusterName', 'Value': clustername}, {'Name': 'ServiceName', 'Value': servicename}], StartTime=datetime.today() -
"""Finds the values in the screen of input multimeter photo""" import argparse from datetime import datetime import cv2 import numpy as np from imutils import contours, is_cv2 from imutils.perspective import four_point_transform PARSER = argparse.ArgumentParser() PARSER.add_argument( "-i", "--image", help="the input image file.", required=True ) PARSER.add_argument( "-lrc", "--lowRangeColor", help="the low range values for hue, sat and val.", nargs="+", default=[21, 14, 125], type=int, ) PARSER.add_argument( "-hrc", "--highRangeColor", help="the high range values for hue, sat and val.", nargs="+", default=[79, 63, 185], type=int, ) PARSER.add_argument( "-yTB", "--yTopBot", help="Top to bottom porcentage to crop from the image.", default=0.1, type=int, ) PARSER.add_argument( "-yBT", "--yBotTop", help="Bottom to top porcentage to crop from the image.", default=0.8, type=int, ) PARSER.add_argument( "-xLR", "--xLeftRight", help="Left to right porcentage to crop from the image.", default=0.035, type=int, ) PARSER.add_argument( "-xRL", "--xRightLeft", help="Right to left porcentage to crop from the image.", default=0.87, type=int, ) PARSER.add_argument( "-rf", "--resizeFactor", help="Resize Factor to use on image.", default=1, type=int, ) PARSER.add_argument( "-d", "--debug", help="Debug mode? True or False", default="False", type=str, ) PARSER.add_argument( "-sp", "--screenPoints", help="Want to give screen points position? True or False\ [top_left, top_right, bottom_right, bottom_left]", default="False", type=str, ) ARGS = vars(PARSER.parse_args()) HSVL = np.array(ARGS["lowRangeColor"], np.uint8) HSVH = np.array(ARGS["highRangeColor"], np.uint8) YTB = ARGS["yTopBot"] YBT = ARGS["yBotTop"] XLR = ARGS["xLeftRight"] XRL = ARGS["xRightLeft"] RFACTOR = ARGS["resizeFactor"] DIGITS_LOOKUP = { (1, 1, 1, 0, 1, 1, 1): 0, (0, 0, 1, 0, 0, 1, 0): 1, (1, 0, 1, 1, 1, 0, 1): 2, (1, 0, 1, 1, 0, 1, 1): 3, (0, 1, 1, 1, 0, 1, 0): 4, (1, 1, 0, 1, 0, 1, 1): 5, (1, 1, 0, 1, 1, 1, 1): 6, (1, 0, 1, 0, 0, 1, 0): 7, (1, 1, 1, 1, 1, 1, 1): 8, (1, 1, 1, 1, 0, 1, 1): 9, (0, 0, 0, 0, 0, 0, 0): "", } SCREEN_AREA_THRESHOLD = 100000 DIST_THRESHOLD = 5.7 SEGMENT_AREA_THRESHOLD = 764 HSVL_LIST = [ np.array(ARGS["lowRangeColor"], np.uint8), [53, 76, 112], [52, 25, 120], [86, 80, 60], ] HSVH_LIST = [ np.array(ARGS["highRangeColor"], np.uint8), [103, 118, 235], [102, 80, 235], [98, 189, 129], ] def str2bool(argument_string): """Transform string to boolean value""" if argument_string.lower() in ("yes", "true", "t", "y", "1"): return True if argument_string.lower() in ("no", "false", "f", "n", "0"): return False raise argparse.ArgumentTypeError("Boolean value expected.") def color_screen_threshold(img, hsvl, hsvh): """Create a mask with the HSV plane""" hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) screen_mask = cv2.inRange( hsv_img, np.array(hsvl, np.uint8), np.array(hsvh, np.uint8) ) kernel = np.ones((2, 2), np.uint8) screen_filter = cv2.dilate(screen_mask, kernel) if DEBUG: cv2.imshow("hsv", hsv_img) cv2.imshow("mask", screen_mask) cv2.imshow("filtered", screen_filter) cv2.waitKey(0) cv2.destroyAllWindows() return screen_filter def get_screen_area(screen_filtered): """Get the screen area, should be the biggest contour Area""" screen_cnts = cv2.findContours( screen_filtered, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) screen_cnts = screen_cnts[0] if is_cv2(or_better=True) else screen_cnts[1] try: biggest_cnt = sorted(screen_cnts, key=cv2.contourArea, reverse=True)[ 0 ] screen_area = cv2.contourArea(biggest_cnt) return (biggest_cnt, screen_area) except IndexError: return (False, 0) def find_screen(img_res): """Tries to find the multimeter screen based on its color, if not possible use default position""" screen_area = 0 for HSVL, HSVJ in zip(HSVL_LIST, HSVH_LIST): screen_filtered = color_screen_threshold(img_res, HSVL, HSVH) (biggest_cnt, screen_area) = get_screen_area(screen_filtered) if screen_area >= SCREEN_AREA_THRESHOLD: screen_box = np.int0(cv2.boxPoints(cv2.minAreaRect(biggest_cnt))) break else: x_screen = img_res.shape[1] y_screen = img_res.shape[0] top_left = (round(x_screen / 4), round(y_screen / 4)) top_right = (round(3 * x_screen / 4), round(y_screen / 4)) bottom_right = (round(3 * x_screen / 4), round(3 * y_screen / 4)) bottom_left = (round(x_screen / 4), round(3 * y_screen / 4)) screen_box = np.array( [top_left, top_right, bottom_right, bottom_left], dtype="float32", ) return four_point_transform(img_res, np.reshape(screen_box, (4, 2))) def open_resize_image(resive_factor): """Opens image and resive it to given factor""" return cv2.resize( cv2.imread(ARGS["image"], 1), (0, 0), fx=resive_factor, fy=resive_factor, ) def filter_screen( screen, kernel_size, erosion_iteration, dilation_iteration, operation="opening", ): """Filter screen with a opening or closing morphological operation""" kernel = np.ones((kernel_size, kernel_size), np.uint8) if operation == "opening": erosion = cv2.erode( screen.copy(), kernel, iterations=erosion_iteration ) return cv2.dilate(erosion, kernel, iterations=dilation_iteration) dilation = cv2.dilate( screen.copy(), kernel, iterations=dilation_iteration ) return cv2.erode(dilation, kernel, iterations=erosion_iteration) def cropp_to_digits(image): """Finds the first pixel at the top and bottom and crops the image""" stop = False height, width = image.shape for y_pixel in range(0, height): for x_pixel in range(0, width): if image[y_pixel, x_pixel] == 0: first_pixel_up = y_pixel stop = True break if stop is True: break stop = False for y_pixel in range(height - 1, -1, -1): for x_pixel in range(0, width): if image[y_pixel, x_pixel] == 0: first_pixel_down = y_pixel stop = True break if stop is True: break return image[first_pixel_up : first_pixel_down + 1] def find_if_close(cnt1, cnt2, dist_th): """Check if contours should be joined, segments sometimes separate""" row1, row2 = cnt1.shape[0], cnt2.shape[0] for i in range(row1): for j in range(row2): (x_bottom_1, _, _, _) = cv2.boundingRect(cnt1[i]) (x_bottom_2, _, _, _) = cv2.boundingRect(cnt2[j]) if abs(x_bottom_1 - x_bottom_2) == 0: return True dist = np.linalg.norm(cnt1[i] - cnt2[j]) if dist < dist_th: return True if i == row1 - 1 and j == row2 - 1: return False return False def join_cnts(image, sorted_contours, dist_th=DIST_THRESHOLD): """Join contours that represent one digit""" status = np.zeros((len(sorted_contours), 1)) for i, cnt1 in enumerate(sorted_contours): x_counter = i if i != len(sorted_contours) - 1: for _, cnt2 in enumerate(sorted_contours[i + 1 :]): x_counter = x_counter + 1 dist = find_if_close(cnt1, cnt2, dist_th) if dist: val = min(status[i], status[x_counter]) status[x_counter] = status[i] = val else: if status[x_counter] == status[i]: status[x_counter] = i + 1 unified = [] maximum = int(status.max()) + 1 for i in range(maximum): pos = np.where(status == i)[0] if pos.size != 0: cont = np.vstack([sorted_contours[i] for i in pos]) hull = cv2.convexHull(cont) unified.append(hull) image_color2 = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR) if DEBUG: cv2.drawContours(image_color2, unified, -1, (0, 255, 0), 2) cv2.imshow("image", image_color2) cv2.waitKey(0) cv2.destroyAllWindows() return contours.sort_contours(unified)[0] def find_digits_cnts(image, dist_th=DIST_THRESHOLD): """Returns all digits contours in image, sorted from left to right""" screen_digit_cnts = cv2.findContours( image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE ) screen_digit_cnts = ( screen_digit_cnts[0] if is_cv2(or_better=True) else screen_digit_cnts[1] ) y_image = image.shape[0] digits_cnts = [] for cnt in screen_digit_cnts: (_, y_bottom, width, height) = cv2.boundingRect(cnt) if (width * height > SEGMENT_AREA_THRESHOLD) or ( width * height > int(SEGMENT_AREA_THRESHOLD / 2) and width > 50 ): digits_cnts.append(cnt) image_color2 = cv2.cvtColor(image.copy(), cv2.COLOR_GRAY2BGR) if DEBUG: cv2.drawContours(image_color2, digits_cnts, -1, (0, 0, 255), 1) cv2.imshow("image2", image_color2) cv2.waitKey(0) cv2.destroyAllWindows() sorted_contours = contours.sort_contours(digits_cnts)[0] return join_cnts(image, sorted_contours, dist_th) def check_segments(roi, point_side="None"): """Check if each segment is on or off""" (roi_height, roi_width) = roi.shape (d_width, d_height) = (int(roi_width * 0.24), int(roi_height * 0.11)) d_height_center = int(d_height * 0.5) d_width_center = int(d_width * 0.5) # top, top-left, top-right, center, bottom_left, bottom_right, bottom if point_side == "None": segments = [ ((d_width, 0), (roi_width - d_width, d_height)), ((0, 0), (d_width, roi_height // 2)), ( (roi_width - int(0.9 * d_width), 0), (roi_width, roi_height // 2), ), ( (d_width_center, roi_height // 2 - d_height_center), ( roi_width - d_width_center, roi_height // 2 + d_height_center, ), ), ((0, roi_height // 2), (d_width, roi_height)), ( (roi_width - int(0.9 * d_width), roi_height // 2), (roi_width, roi_height), ), ((0, roi_height - d_height), (roi_width - 10, roi_height)), ] elif point_side == "Right": segments = [ ((0, 0), (roi_width - 30, d_height - 7)), ((0, 0), (d_width, roi_height // 2)), ((roi_width - d_width - 5, 0), (roi_width - 13, roi_height // 2)), ( (0, roi_height // 2 - d_height_center - 7), (roi_width - 20, roi_height // 2 + d_height_center), ), ((0, roi_height // 2), (d_width, roi_height)), ( (roi_width - d_width - 13, roi_height // 2), (roi_width - 13, roi_height), ), ((0, roi_height - d_height), (roi_width - 13, roi_height)), ] elif point_side == "Left": segments = [ ((20, 0), (roi_width, d_height - 7)), ((20, 0), (d_width + 15, roi_height // 2)), ((roi_width - d_width + 5, 0), (roi_width, roi_height // 2)), ( (20, roi_height // 2 - d_height_center - 7), (roi_width, roi_height // 2 + d_height_center), ), ((20, roi_height // 2), (d_width + 15, roi_height)), ( (roi_width - d_width + 5, roi_height // 2), (roi_width, roi_height), ), ((20, roi_height - d_height), (roi_width, roi_height)), ] on_segments = [0] * len(segments) for (i, ((x_s_beginning, y_s_bottom), (x_s_final, y_s_top))) in enumerate( segments ): segment_roi = roi[y_s_bottom:y_s_top, x_s_beginning:x_s_final] pixels_total = cv2.countNonZero(segment_roi) segment_area = (x_s_final - x_s_beginning) * (y_s_top - y_s_bottom) if pixels_total / float(segment_area) > 0.45: on_segments[i] = 1 return on_segments def check_right_left_dot(roi): """To treat exception check left and right side for dot when contours join""" kernel = np.ones((2, 2), np.uint8) roi =
of a transaction.<br/>Default value: YES<br/>Possible values = YES, NO. """ try : return self._sendclosenotify except Exception as e: raise e @sendclosenotify.setter def sendclosenotify(self, sendclosenotify) : ur"""Enable sending SSL Close-Notify at the end of a transaction.<br/>Default value: YES<br/>Possible values = YES, NO """ try : self._sendclosenotify = sendclosenotify except Exception as e: raise e @property def cleartextport(self) : ur"""The clearTextPort settings.<br/>Range 1 - 65535. """ try : return self._cleartextport except Exception as e: raise e @cleartextport.setter def cleartextport(self, cleartextport) : ur"""The clearTextPort settings.<br/>Range 1 - 65535 """ try : self._cleartextport = cleartextport except Exception as e: raise e @property def insertionencoding(self) : ur"""Encoding method used to insert the subject or issuer's name in HTTP requests to servers.<br/>Default value: Unicode<br/>Possible values = Unicode, UTF-8. """ try : return self._insertionencoding except Exception as e: raise e @insertionencoding.setter def insertionencoding(self, insertionencoding) : ur"""Encoding method used to insert the subject or issuer's name in HTTP requests to servers.<br/>Default value: Unicode<br/>Possible values = Unicode, UTF-8 """ try : self._insertionencoding = insertionencoding except Exception as e: raise e @property def denysslreneg(self) : ur"""Deny renegotiation in specified circumstances. Available settings function as follows: * NO - Allow SSL renegotiation. * FRONTEND_CLIENT - Deny secure and nonsecure SSL renegotiation initiated by the client. * FRONTEND_CLIENTSERVER - Deny secure and nonsecure SSL renegotiation initiated by the client or the NetScaler during policy-based client authentication. * ALL - Deny all secure and nonsecure SSL renegotiation. * NONSECURE - Deny nonsecure SSL renegotiation. Allows only clients that support RFC 5746.<br/>Default value: ALL<br/>Possible values = NO, FRONTEND_CLIENT, FRONTEND_CLIENTSERVER, ALL, NONSECURE. """ try : return self._denysslreneg except Exception as e: raise e @denysslreneg.setter def denysslreneg(self, denysslreneg) : ur"""Deny renegotiation in specified circumstances. Available settings function as follows: * NO - Allow SSL renegotiation. * FRONTEND_CLIENT - Deny secure and nonsecure SSL renegotiation initiated by the client. * FRONTEND_CLIENTSERVER - Deny secure and nonsecure SSL renegotiation initiated by the client or the NetScaler during policy-based client authentication. * ALL - Deny all secure and nonsecure SSL renegotiation. * NONSECURE - Deny nonsecure SSL renegotiation. Allows only clients that support RFC 5746.<br/>Default value: ALL<br/>Possible values = NO, FRONTEND_CLIENT, FRONTEND_CLIENTSERVER, ALL, NONSECURE """ try : self._denysslreneg = denysslreneg except Exception as e: raise e @property def quantumsize(self) : ur"""Amount of data to collect before the data is pushed to the crypto hardware for encryption. For large downloads, a larger quantum size better utilizes the crypto resources.<br/>Default value: 8192<br/>Possible values = 4096, 8192, 16384. """ try : return self._quantumsize except Exception as e: raise e @quantumsize.setter def quantumsize(self, quantumsize) : ur"""Amount of data to collect before the data is pushed to the crypto hardware for encryption. For large downloads, a larger quantum size better utilizes the crypto resources.<br/>Default value: 8192<br/>Possible values = 4096, 8192, 16384 """ try : self._quantumsize = quantumsize except Exception as e: raise e @property def strictcachecks(self) : ur"""Enable strict CA certificate checks on the appliance.<br/>Default value: NO<br/>Possible values = YES, NO. """ try : return self._strictcachecks except Exception as e: raise e @strictcachecks.setter def strictcachecks(self, strictcachecks) : ur"""Enable strict CA certificate checks on the appliance.<br/>Default value: NO<br/>Possible values = YES, NO """ try : self._strictcachecks = strictcachecks except Exception as e: raise e @property def encrypttriggerpktcount(self) : ur"""Maximum number of queued packets after which encryption is triggered. Use this setting for SSL transactions that send small packets from server to NetScaler.<br/>Default value: 45<br/>Minimum length = 10<br/>Maximum length = 50. """ try : return self._encrypttriggerpktcount except Exception as e: raise e @encrypttriggerpktcount.setter def encrypttriggerpktcount(self, encrypttriggerpktcount) : ur"""Maximum number of queued packets after which encryption is triggered. Use this setting for SSL transactions that send small packets from server to NetScaler.<br/>Default value: 45<br/>Minimum length = 10<br/>Maximum length = 50 """ try : self._encrypttriggerpktcount = encrypttriggerpktcount except Exception as e: raise e @property def pushflag(self) : ur"""Insert PUSH flag into decrypted, encrypted, or all records. If the PUSH flag is set to a value other than 0, the buffered records are forwarded on the basis of the value of the PUSH flag. Available settings function as follows: 0 - Auto (PUSH flag is not set.) 1 - Insert PUSH flag into every decrypted record. 2 -Insert PUSH flag into every encrypted record. 3 - Insert PUSH flag into every decrypted and encrypted record.<br/>Maximum length = 3. """ try : return self._pushflag except Exception as e: raise e @pushflag.setter def pushflag(self, pushflag) : ur"""Insert PUSH flag into decrypted, encrypted, or all records. If the PUSH flag is set to a value other than 0, the buffered records are forwarded on the basis of the value of the PUSH flag. Available settings function as follows: 0 - Auto (PUSH flag is not set.) 1 - Insert PUSH flag into every decrypted record. 2 -Insert PUSH flag into every encrypted record. 3 - Insert PUSH flag into every decrypted and encrypted record.<br/>Maximum length = 3 """ try : self._pushflag = pushflag except Exception as e: raise e @property def dropreqwithnohostheader(self) : ur"""Host header check for SNI enabled sessions. If this check is enabled and the HTTP request does not contain the host header for SNI enabled sessions, the request is dropped.<br/>Default value: NO<br/>Possible values = YES, NO. """ try : return self._dropreqwithnohostheader except Exception as e: raise e @dropreqwithnohostheader.setter def dropreqwithnohostheader(self, dropreqwithnohostheader) : ur"""Host header check for SNI enabled sessions. If this check is enabled and the HTTP request does not contain the host header for SNI enabled sessions, the request is dropped.<br/>Default value: NO<br/>Possible values = YES, NO """ try : self._dropreqwithnohostheader = dropreqwithnohostheader except Exception as e: raise e @property def pushenctriggertimeout(self) : ur"""PUSH encryption trigger timeout value. The timeout value is applied only if you set the Push Encryption Trigger parameter to Timer in the SSL virtual server settings.<br/>Default value: 1<br/>Minimum length = 1<br/>Maximum length = 200. """ try : return self._pushenctriggertimeout except Exception as e: raise e @pushenctriggertimeout.setter def pushenctriggertimeout(self, pushenctriggertimeout) : ur"""PUSH encryption trigger timeout value. The timeout value is applied only if you set the Push Encryption Trigger parameter to Timer in the SSL virtual server settings.<br/>Default value: 1<br/>Minimum length = 1<br/>Maximum length = 200 """ try : self._pushenctriggertimeout = pushenctriggertimeout except Exception as e: raise e @property def ssltriggertimeout(self) : ur"""Time, in milliseconds, after which encryption is triggered for transactions that are not tracked on the NetScaler appliance because their length is not known. There can be a delay of up to 10ms from the specified timeout value before the packet is pushed into the queue.<br/>Default value: 100<br/>Minimum length = 1<br/>Maximum length = 200. """ try : return self._ssltriggertimeout except Exception as e: raise e @ssltriggertimeout.setter def ssltriggertimeout(self, ssltriggertimeout) : ur"""Time, in milliseconds, after which encryption is triggered for transactions that are not tracked on the NetScaler appliance because their length is not known. There can be a delay of up to 10ms from the specified timeout value before the packet is pushed into the queue.<br/>Default value: 100<br/>Minimum length = 1<br/>Maximum length = 200 """ try : self._ssltriggertimeout = ssltriggertimeout except Exception as e: raise e def _get_nitro_response(self, service, response) : ur""" converts nitro response into object and returns the object array in case of get request. """ try : result = service.payload_formatter.string_to_resource(sslprofile_response, response, self.__class__.__name__) if(result.errorcode != 0) : if (result.errorcode == 444) : service.clear_session(self) if result.severity : if (result.severity == "ERROR") : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) else : raise nitro_exception(result.errorcode, str(result.message), str(result.severity)) return result.sslprofile except Exception as e : raise e def _get_object_name(self) : ur""" Returns the value of object identifier argument """ try : if self.name is not None : return str(self.name) return None except Exception as e : raise e @classmethod def add(cls, client, resource) : ur""" Use this API to add sslprofile. """ try : if type(resource) is not list : addresource = sslprofile() addresource.name = resource.name addresource.sslprofiletype = resource.sslprofiletype addresource.dhcount = resource.dhcount addresource.dh = resource.dh addresource.dhfile = resource.dhfile addresource.ersa = resource.ersa addresource.ersacount = resource.ersacount addresource.sessreuse = resource.sessreuse addresource.sesstimeout = resource.sesstimeout addresource.cipherredirect = resource.cipherredirect addresource.cipherurl = resource.cipherurl addresource.clientauth = resource.clientauth addresource.clientcert = resource.clientcert addresource.sslredirect = resource.sslredirect addresource.redirectportrewrite = resource.redirectportrewrite addresource.nonfipsciphers = resource.nonfipsciphers addresource.ssl3 = resource.ssl3 addresource.tls1 = resource.tls1 addresource.tls11 = resource.tls11 addresource.tls12 = resource.tls12 addresource.snienable = resource.snienable addresource.serverauth = resource.serverauth addresource.commonname = resource.commonname addresource.pushenctrigger = resource.pushenctrigger addresource.sendclosenotify = resource.sendclosenotify addresource.cleartextport = resource.cleartextport addresource.insertionencoding = resource.insertionencoding addresource.denysslreneg = resource.denysslreneg addresource.quantumsize = resource.quantumsize addresource.strictcachecks = resource.strictcachecks addresource.encrypttriggerpktcount = resource.encrypttriggerpktcount addresource.pushflag = resource.pushflag addresource.dropreqwithnohostheader = resource.dropreqwithnohostheader addresource.pushenctriggertimeout = resource.pushenctriggertimeout addresource.ssltriggertimeout = resource.ssltriggertimeout return addresource.add_resource(client) else : if (resource and len(resource) > 0) : addresources = [ sslprofile() for _ in range(len(resource))] for i in range(len(resource)) : addresources[i].name = resource[i].name addresources[i].sslprofiletype = resource[i].sslprofiletype addresources[i].dhcount = resource[i].dhcount addresources[i].dh = resource[i].dh addresources[i].dhfile = resource[i].dhfile addresources[i].ersa = resource[i].ersa addresources[i].ersacount = resource[i].ersacount addresources[i].sessreuse = resource[i].sessreuse addresources[i].sesstimeout = resource[i].sesstimeout addresources[i].cipherredirect = resource[i].cipherredirect addresources[i].cipherurl = resource[i].cipherurl addresources[i].clientauth = resource[i].clientauth addresources[i].clientcert = resource[i].clientcert addresources[i].sslredirect = resource[i].sslredirect addresources[i].redirectportrewrite = resource[i].redirectportrewrite addresources[i].nonfipsciphers = resource[i].nonfipsciphers addresources[i].ssl3 = resource[i].ssl3 addresources[i].tls1 = resource[i].tls1 addresources[i].tls11 = resource[i].tls11 addresources[i].tls12 = resource[i].tls12 addresources[i].snienable = resource[i].snienable addresources[i].serverauth = resource[i].serverauth addresources[i].commonname = resource[i].commonname addresources[i].pushenctrigger = resource[i].pushenctrigger addresources[i].sendclosenotify = resource[i].sendclosenotify addresources[i].cleartextport = resource[i].cleartextport addresources[i].insertionencoding = resource[i].insertionencoding addresources[i].denysslreneg = resource[i].denysslreneg addresources[i].quantumsize = resource[i].quantumsize addresources[i].strictcachecks = resource[i].strictcachecks addresources[i].encrypttriggerpktcount = resource[i].encrypttriggerpktcount addresources[i].pushflag = resource[i].pushflag addresources[i].dropreqwithnohostheader = resource[i].dropreqwithnohostheader addresources[i].pushenctriggertimeout = resource[i].pushenctriggertimeout addresources[i].ssltriggertimeout = resource[i].ssltriggertimeout result = cls.add_bulk_request(client, addresources) return result except Exception as e : raise e @classmethod def delete(cls, client, resource) : ur""" Use this API to delete sslprofile. """ try : if type(resource) is not list : deleteresource = sslprofile() if type(resource) != type(deleteresource): deleteresource.name = resource else : deleteresource.name = resource.name return deleteresource.delete_resource(client) else : if type(resource[0]) != cls : if (resource and len(resource) > 0) : deleteresources = [ sslprofile() for _ in range(len(resource))] for i in range(len(resource))
""" $lic$ Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of Stanford University This program is free software: you can redistribute it and/or modify it under the terms of the Modified BSD-3 License as published by the Open Source Initiative. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD-3 License for more details. You should have received a copy of the Modified BSD-3 License along with this program. If not, see <https://opensource.org/licenses/BSD-3-Clause>. """ import re from nn_dataflow.core import InputLayer, ConvLayer, FCLayer, PoolingLayer from nn_dataflow.core import InterLayerPipeline from nn_dataflow.core import Network from nn_dataflow.core import Option from nn_dataflow.core import PhyDim2 from nn_dataflow.core import PipelineSegment from . import TestPipelineFixture class TestInterLayerPipeline(TestPipelineFixture): ''' Tests for InterLayerPipeline. ''' def test_valid_args(self): ''' Valid arguments. ''' ilp = InterLayerPipeline(self.net['net1'], self.batch_size, self.resource, max_util_drop=0.1) self.assertIs(ilp.network, self.net['net1']) self.assertEqual(ilp.batch_size, self.batch_size) self.assertIs(ilp.resource, self.resource) self.assertEqual(ilp.max_util_drop, 0.1) def test_invalid_network(self): ''' Invalid network. ''' with self.assertRaisesRegex(TypeError, 'InterLayerPipeline: .*network.*'): _ = InterLayerPipeline(self.net['net1'].input_layer(), self.batch_size, self.resource) def test_invalid_resource(self): ''' Invalid resource. ''' with self.assertRaisesRegex(TypeError, 'InterLayerPipeline: .*resource.*'): _ = InterLayerPipeline(self.net['net1'], self.batch_size, PhyDim2(1, 1)) def test_invalid_max_util_drop(self): ''' Invalid max_util_drop. ''' with self.assertRaisesRegex(ValueError, 'InterLayerPipeline: .*max_util_drop.*'): _ = InterLayerPipeline(self.net['net1'], self.batch_size, self.resource, max_util_drop=1.1) with self.assertRaisesRegex(ValueError, 'InterLayerPipeline: .*max_util_drop.*'): _ = InterLayerPipeline(self.net['net1'], self.batch_size, self.resource, max_util_drop=-0.1) def test_topological_order(self): ''' Topological order. ''' for net in self.net.values(): if not net.net_name.startswith('net'): continue ilp = self._make_ilp(net) for layer in net: vidx = ilp.dag_vertex_dict[layer] self.assertIn(layer, ilp.dag_vertex_list[vidx]) # Layer is named by topological order. self.assertTrue(layer.startswith(str(vidx))) # Disjoint union. vs_list = [set(v) for v in ilp.dag_vertex_list] for idx, vs in enumerate(vs_list): for vs2 in vs_list[:idx]: self.assertTrue(vs.isdisjoint(vs2)) self.assertSetEqual(set.union(*vs_list), set(net)) def test_vertex_no_merge_lr(self): ''' LocalRegionLayer has no previous layer to merge with. ''' net = Network('tmp_net') net.set_input_layer(InputLayer(30, 1)) net.add('0', PoolingLayer(30, 1, 1)) net.add('1', FCLayer(30, 40)) net.add('1p', PoolingLayer(40, 1, 1)) ilp = self._make_ilp(net) for layer in net: vidx = ilp.dag_vertex_dict[layer] self.assertIn(layer, ilp.dag_vertex_list[vidx]) # Layer is named by topological order. self.assertTrue(layer.startswith(str(vidx))) def test_prev(self): ''' Previous relationship. ''' for net in self.net.values(): ilp = self._make_ilp(net) for vidx, prevs in ilp.dag_prev_dict.items(): # Previous layers of the current vertex. prev_layers = set() v = ilp.dag_vertex_list[vidx] for l in v: prev_layers.update(net.prevs(l)) prev_layers.difference_update(v) for pvidx in prevs: # Previous vertices should be ordered before this vertex. self.assertLess(pvidx, vidx) # Previous vertex should have at least one previous layer. if pvidx < 0: self.assertTrue( None in prev_layers or not prev_layers.isdisjoint(net.ext_layers())) else: pv = ilp.dag_vertex_list[pvidx] self.assertFalse(prev_layers.isdisjoint(pv)) def test_next(self): ''' Next relationship. ''' for net in self.net.values(): ilp = self._make_ilp(net) for vidx, nexts in ilp.dag_next_dict.items(): # Next layers of the current vertex. next_layers = set() if vidx < 0: # Go through all layers and add those with input layer as # previous. for l in net: prevs = set(net.prevs(l)) if None in prevs \ or not prevs.isdisjoint(net.ext_layers()): next_layers.add(l) else: v = ilp.dag_vertex_list[vidx] for l in v: next_layers.update(net.nexts(l)) next_layers.difference_update(v) for nvidx in nexts: # Next vertices should be ordered after this vertex. self.assertGreater(nvidx, vidx) # Next vertex should have at least one next layer. nv = ilp.dag_vertex_list[nvidx] self.assertFalse(next_layers.isdisjoint(nv)) def test_match_prev_next(self): ''' Previous and next relationships match. ''' for net in self.net.values(): ilp = self._make_ilp(net) for vidx, prevs in ilp.dag_prev_dict.items(): for pvidx in prevs: self.assertIn(vidx, ilp.dag_next_dict[pvidx]) for vidx, nexts in ilp.dag_next_dict.items(): for nvidx in nexts: self.assertIn(vidx, ilp.dag_prev_dict[nvidx]) def test_gen_vseg(self): ''' _gen_vseg. ''' # pylint: disable=protected-access # Simple case. ilp = self._make_ilp(self.net['net1']) num = len(ilp.dag_vertex_list) self.assertEqual(len(list(ilp._gen_vseg())), (num + 1) * num // 2) # Linear case. # Number of different vsegs of n = 1 + ... + n ilp = self._make_ilp(self.net['net2']) num = len(ilp.dag_vertex_list) self.assertEqual(len(list(ilp._gen_vseg())), (num + 1) * num // 2) # Fork case. ilp = self._make_ilp(self.net['net4']) vseg_list = list(ilp._gen_vseg()) self.assertEqual(len(vseg_list), 39) # Case with one of multiple previous vertices on-chip. self.assertIn((9, 10), vseg_list) self.assertIn((13, 14), vseg_list) # Case with only one next vertex off-chip. self.assertIn((7, 8), vseg_list) self.assertNotIn((4, 5, 6), vseg_list) # Multiple first layers. self.assertGreater(len(self.net['net3'].firsts()), 1) ilp = self._make_ilp(self.net['net3']) vseg_list = list(ilp._gen_vseg()) self.assertIn((0,), vseg_list) self.assertIn((1,), vseg_list) # Verify rules. ilp = self._make_ilp(self.net['net5']) vseg_list = list(ilp._gen_vseg()) # Layers with no shared dependencies. self.assertNotIn((2, 3, 4), vseg_list) self.assertNotIn((8, 9), vseg_list) # Multiple previous layers. self.assertNotIn((5, 6, 7), vseg_list) self.assertNotIn((8, 9, 10), vseg_list) self.assertNotIn((10, 11, 12), vseg_list) # Multiple next layers. self.assertNotIn((0, 1, 2, 3), vseg_list) self.assertIn((3, 4), vseg_list) self.assertIn((3, 4, 5), vseg_list) self.assertIn((10, 11), vseg_list) # No duplicate. for net in self.net.values(): ilp = self._make_ilp(net) vseg_list = list(ilp._gen_vseg()) self.assertEqual(len(vseg_list), len(set(vseg_list))) # Real networks. ilp = self._make_ilp(self.net['zfnet']) self.assertEqual(len(ilp.dag_vertex_list), 8) vseg_list = list(ilp._gen_vseg()) self.assertEqual(len(vseg_list), 36) ilp = self._make_ilp(self.net['vgg_net']) self.assertEqual(len(ilp.dag_vertex_list), 16) vseg_list = list(ilp._gen_vseg()) self.assertEqual(len(vseg_list), 136) # Large networks with forks. for net_name in ['googlenet', 'resnet152']: net = self.net[net_name] ilp = self._make_ilp(net) vseg_list = list(ilp._gen_vseg()) self.assertEqual(len(vseg_list), len(set(vseg_list))) # The number of different vsegs is between one and eight times of # the number of layers. self.assertGreater(len(vseg_list), len(net)) self.assertLessEqual(len(vseg_list), len(net) * 8) def test_gen_vseg_twice(self): ''' _gen_vseg twice. ''' # pylint: disable=protected-access for net_name in self.net: if not net_name.startswith('net'): continue net = self.net[net_name] ilp = self._make_ilp(net) vseg_list_1 = list(ilp._gen_vseg()) vseg_list_2 = list(ilp._gen_vseg()) self.assertListEqual(vseg_list_1, vseg_list_2) def test_ordered_layer_list(self): ''' Get ordered_layer_list. ''' # https://stackoverflow.com/a/4836734/5277823 nat_key = lambda key: tuple(int(c) if c.isdigit() else c.lower() for c in re.split('([0-9]+)', key)) for net_name in ['net1', 'net2', 'net3', 'net4', 'net5']: net = self.net[net_name] ilp = self._make_ilp(net) ord_list = ilp.ordered_layer_list() # In natural order. self.assertTrue(all(nat_key(l1) < nat_key(l2) for l1, l2 in zip(ord_list, ord_list[1:]))) def test_gen_segment(self): ''' gen_segment(). ''' for net_name in self.net: net = self.net[net_name] ilp = self._make_ilp(net) # No pipelining. options = Option() segs_n_lst = list(ilp.gen_segment(options)) segs_n = set(segs_n_lst) self.assertEqual(len(segs_n_lst), len(segs_n)) for seg in segs_n: self.assertEqual(len(seg), 1) self.assertEqual(len(seg[0]), 1) self.assertIn(seg[0][0], net) # Spatial pipelining. options = Option(partition_interlayer=True) segs_sp_lst = list(ilp.gen_segment(options)) segs_sp = set(segs_sp_lst) self.assertEqual(len(segs_sp_lst), len(segs_sp)) for seg in segs_sp: for ltpl in seg: self.assertLessEqual(sum(1 for l in ltpl if isinstance(l, ConvLayer)), 1) self.assertTrue(segs_sp.issuperset(segs_n)) # Temporal pipelining. options = Option(hw_gbuf_save_writeback=True) segs_tp_lst = list(ilp.gen_segment(options)) segs_tp = set(segs_tp_lst) self.assertEqual(len(segs_tp_lst), len(segs_tp)) for seg in segs_tp: self.assertEqual(len(seg), 1) self.assertTrue(segs_tp.issuperset(segs_n)) # Spatial and temporal pipelining. options = Option(partition_interlayer=True, hw_gbuf_save_writeback=True) segs_stp_lst = list(ilp.gen_segment(options)) segs_stp = set(segs_stp_lst) self.assertEqual(len(segs_stp_lst), len(segs_stp)) self.assertSetEqual(segs_stp, segs_tp | segs_sp) # Only single-layer and single-vertex segments have the same # spatial and temporal pipelining. segs_intersect = segs_tp & segs_sp segs_single = segs_n segs_single |= set(PipelineSegment((v,), ilp.network, ilp.batch_size, ilp.resource) for v in ilp.dag_vertex_list) self.assertTrue(segs_intersect.issubset(segs_single)) def test_gen_segment_max_degree(self): ''' gen_segment() maximum degree. ''' net = self.net['vgg_net'] ilp = self._make_ilp(net) options = Option(partition_interlayer=True, hw_gbuf_save_writeback=True, layer_pipeline_max_degree=4) for segment in ilp.gen_segment(options): self.assertLessEqual(sum(1 if isinstance(net[l], ConvLayer) else 0 for ltpl in segment for l in ltpl), 4) def test_gen_segment_vseg(self): ''' gen_segment() vertex segment. ''' for net_name in self.net: if not net_name.startswith('net'): continue net = self.net[net_name] ilp = self._make_ilp(net) options = Option(partition_interlayer=True) seg_set = set(ilp.gen_segment(options)) self.assertTrue(seg_set) seg_v_set = set(self._gen_all_segment(net)) self.assertTrue(seg_set.issubset(seg_v_set)) def test_gen_segment_multi_prevs(self): ''' gen_segment() with multiple previous vertices. ''' # pylint: disable=protected-access net = self.net['net4'] ilp = self._make_ilp(net) vseg_set = set(ilp._gen_vseg()) self.assertIn((9, 10), vseg_set) self.assertIn((13, 14), vseg_set) options = Option(partition_interlayer=True) seg_set = set(ilp.gen_segment(options)) # 10 only has neighbor source 9; 10p only has local source 10 and # memory source 8. Valid. self.assertIn(self._make_segment((9, 10), ilp.network), seg_set) # 14 has both neighbor source 13, and memory source 12, etc.. Invalid. self.assertNotIn(self._make_segment((13, 14), ilp.network), seg_set) def test_gen_segment_one_nexts(self): ''' gen_segment() with missing one next vertex. ''' # pylint: disable=protected-access net = self.net['net4'] ilp = self._make_ilp(net) vseg_set = set(ilp._gen_vseg()) self.assertIn((7, 8), vseg_set) self.assertNotIn((4, 5, 6), vseg_set) options = Option(partition_interlayer=True) seg_set = set(ilp.gen_segment(options)) self.assertIn(self._make_segment((7, 8), ilp.network), seg_set) self.assertNotIn(self._make_segment((4, 5, 6), ilp.network), seg_set) def test_gen_segment_not_opt(self): ''' gen_segment() not with_opt. ''' options_with_opt = Option(partition_interlayer=True, hw_gbuf_save_writeback=True, layer_pipeline_opt=True) options_not_opt = Option(partition_interlayer=True, hw_gbuf_save_writeback=True, layer_pipeline_opt=False) # Linear ones for net_name in ['net1', 'net2', 'zfnet']: net = self.net[net_name] ilp = self._make_ilp(net) segs_with_opt = set(seg.seg for seg in ilp.gen_segment(options_with_opt)) segs_not_opt = set(seg.seg for seg in ilp.gen_segment(options_not_opt)) self.assertSetEqual(segs_with_opt, segs_not_opt) # Non-linear ones for net_name in ['net3', 'net4', 'net5', 'net6', 'net7', 'googlenet']: net = self.net[net_name] ilp = self._make_ilp(net) segs_with_opt = set(seg.seg for seg in ilp.gen_segment(options_with_opt)) segs_not_opt = set(seg.seg for seg in ilp.gen_segment(options_not_opt)) self.assertTrue(segs_with_opt.issuperset(segs_not_opt)) def test_gen_segment_resnet(self): ''' gen_segment() with ResNet. ''' net = self.net['resnet152'] ilp = self._make_ilp(net) options =
field must indicate the path of the resource. For example, ``projects/example-project/locations/us-central1/registries/my-registry``. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.iot_v1.types.DeviceRegistry` update_mask (Union[dict, ~google.cloud.iot_v1.types.FieldMask]): Only updates the ``device_registry`` fields indicated by this mask. The field mask must not be empty, and it must not contain fields that are immutable or only set by the server. Mutable top-level fields: ``event_notification_config``, ``http_config``, ``mqtt_config``, and ``state_notification_config``. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.iot_v1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.iot_v1.types.DeviceRegistry` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "update_device_registry" not in self._inner_api_calls: self._inner_api_calls[ "update_device_registry" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.update_device_registry, default_retry=self._method_configs["UpdateDeviceRegistry"].retry, default_timeout=self._method_configs["UpdateDeviceRegistry"].timeout, client_info=self._client_info, ) request = device_manager_pb2.UpdateDeviceRegistryRequest( device_registry=device_registry, update_mask=update_mask ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("device_registry.name", device_registry.name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["update_device_registry"]( request, retry=retry, timeout=timeout, metadata=metadata ) def delete_device_registry( self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Deletes a device registry configuration. Example: >>> from google.cloud import iot_v1 >>> >>> client = iot_v1.DeviceManagerClient() >>> >>> name = client.registry_path('[PROJECT]', '[LOCATION]', '[REGISTRY]') >>> >>> client.delete_device_registry(name) Args: name (str): The name of the device registry. For example, ``projects/example-project/locations/us-central1/registries/my-registry``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "delete_device_registry" not in self._inner_api_calls: self._inner_api_calls[ "delete_device_registry" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_device_registry, default_retry=self._method_configs["DeleteDeviceRegistry"].retry, default_timeout=self._method_configs["DeleteDeviceRegistry"].timeout, client_info=self._client_info, ) request = device_manager_pb2.DeleteDeviceRegistryRequest(name=name) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) self._inner_api_calls["delete_device_registry"]( request, retry=retry, timeout=timeout, metadata=metadata ) def list_device_registries( self, parent, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Lists device registries. Example: >>> from google.cloud import iot_v1 >>> >>> client = iot_v1.DeviceManagerClient() >>> >>> parent = client.location_path('[PROJECT]', '[LOCATION]') >>> >>> # Iterate over all results >>> for element in client.list_device_registries(parent): ... # process element ... pass >>> >>> >>> # Alternatively: >>> >>> # Iterate over results one page at a time >>> for page in client.list_device_registries(parent).pages: ... for element in page: ... # process element ... pass Args: parent (str): The project and cloud region path. For example, ``projects/example-project/locations/us-central1``. page_size (int): The maximum number of resources contained in the underlying API response. If page streaming is performed per- resource, this parameter does not affect the return value. If page streaming is performed per-page, this determines the maximum number of resources in a page. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.api_core.page_iterator.PageIterator` instance. An iterable of :class:`~google.cloud.iot_v1.types.DeviceRegistry` instances. You can also iterate over the pages of the response using its `pages` property. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "list_device_registries" not in self._inner_api_calls: self._inner_api_calls[ "list_device_registries" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.list_device_registries, default_retry=self._method_configs["ListDeviceRegistries"].retry, default_timeout=self._method_configs["ListDeviceRegistries"].timeout, client_info=self._client_info, ) request = device_manager_pb2.ListDeviceRegistriesRequest( parent=parent, page_size=page_size ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) iterator = google.api_core.page_iterator.GRPCIterator( client=None, method=functools.partial( self._inner_api_calls["list_device_registries"], retry=retry, timeout=timeout, metadata=metadata, ), request=request, items_field="device_registries", request_token_field="page_token", response_token_field="next_page_token", ) return iterator def create_device( self, parent, device, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Creates a device in a device registry. Example: >>> from google.cloud import iot_v1 >>> >>> client = iot_v1.DeviceManagerClient() >>> >>> parent = client.registry_path('[PROJECT]', '[LOCATION]', '[REGISTRY]') >>> >>> # TODO: Initialize `device`: >>> device = {} >>> >>> response = client.create_device(parent, device) Args: parent (str): The name of the device registry where this device should be created. For example, ``projects/example-project/locations/us-central1/registries/my-registry``. device (Union[dict, ~google.cloud.iot_v1.types.Device]): The device registration details. The field ``name`` must be empty. The server generates ``name`` from the device registry ``id`` and the ``parent`` field. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.iot_v1.types.Device` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.iot_v1.types.Device` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "create_device" not in self._inner_api_calls: self._inner_api_calls[ "create_device" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.create_device, default_retry=self._method_configs["CreateDevice"].retry, default_timeout=self._method_configs["CreateDevice"].timeout, client_info=self._client_info, ) request = device_manager_pb2.CreateDeviceRequest(parent=parent, device=device) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("parent", parent)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["create_device"]( request, retry=retry, timeout=timeout, metadata=metadata ) def get_device( self, name, field_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Gets details about a device. Example: >>> from google.cloud import iot_v1 >>> >>> client = iot_v1.DeviceManagerClient() >>> >>> name = client.device_path('[PROJECT]', '[LOCATION]', '[REGISTRY]', '[DEVICE]') >>> >>> response = client.get_device(name) Args: name (str): The name of the device. For example, ``projects/p0/locations/us-central1/registries/registry0/devices/device0`` or ``projects/p0/locations/us-central1/registries/registry0/devices/{num_id}``. field_mask (Union[dict, ~google.cloud.iot_v1.types.FieldMask]): The fields of the ``Device`` resource to be returned in the response. If the field mask is unset or empty, all fields are returned. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.iot_v1.types.FieldMask` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.iot_v1.types.Device` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "get_device" not in self._inner_api_calls: self._inner_api_calls[ "get_device" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.get_device, default_retry=self._method_configs["GetDevice"].retry, default_timeout=self._method_configs["GetDevice"].timeout, client_info=self._client_info, ) request = device_manager_pb2.GetDeviceRequest(name=name, field_mask=field_mask) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["get_device"]( request, retry=retry, timeout=timeout, metadata=metadata ) def update_device( self, device, update_mask, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """
# # Copyright The NOMAD Authors. # # This file is part of NOMAD. # See https://nomad-lab.eu for further info. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import numpy as np # pylint: disable=unused-import import typing # pylint: disable=unused-import from nomad.metainfo import ( # pylint: disable=unused-import MSection, MCategory, Category, Package, Quantity, Section, SubSection, SectionProxy, Reference ) from nomad.datamodel.metainfo import simulation from nomad.datamodel.metainfo import workflow m_package = Package() class x_charmm_mdin_input_output_files(MCategory): ''' Parameters of mdin belonging to x_charmm_section_control_parameters. ''' m_def = Category() class x_charmm_mdin_control_parameters(MCategory): ''' Parameters of mdin belonging to x_charmm_section_control_parameters. ''' m_def = Category() class x_charmm_mdin_method(MCategory): ''' Parameters of mdin belonging to section method. ''' m_def = Category() class x_charmm_mdout_single_configuration_calculation(MCategory): ''' Parameters of mdout belonging to section_single_configuration_calculation. ''' m_def = Category() class x_charmm_mdout_method(MCategory): ''' Parameters of mdin belonging to section method. ''' m_def = Category() class x_charmm_mdout_run(MCategory): ''' Parameters of mdin belonging to settings run. ''' m_def = Category() class x_charmm_mdin_run(MCategory): ''' Parameters of mdin belonging to settings run. ''' m_def = Category() class x_charmm_section_input_output_files(MSection): ''' Section to store input and output file names ''' m_def = Section(validate=False) class x_charmm_section_control_parameters(MSection): ''' Section to store the input and output control parameters ''' m_def = Section(validate=False) x_charmm_inout_file_structure = Quantity( type=str, shape=[], description=''' charmm input topology file. ''') x_charmm_inout_file_trajectory = Quantity( type=str, shape=[], description=''' charmm output trajectory file. ''') x_charmm_inout_file_traj_coord = Quantity( type=str, shape=[], description=''' charmm output trajectory file. ''') x_charmm_inout_file_traj_vel = Quantity( type=str, shape=[], description=''' charmm output file for velocities in the trajectory. ''') x_charmm_inout_file_traj_force = Quantity( type=str, shape=[], description=''' charmm output file for forces in the trajectory. ''') x_charmm_inout_file_output_coord = Quantity( type=str, shape=[], description=''' charmm output coordinates file. ''') x_charmm_inout_file_out_coor_str = Quantity( type=str, shape=[], description=''' charmm output coordinates on log. ''') x_charmm_inout_file_output_vel = Quantity( type=str, shape=[], description=''' charmm output velocities file. ''') x_charmm_inout_file_output_force = Quantity( type=str, shape=[], description=''' charmm output forces file. ''') x_charmm_inout_file_input_coord = Quantity( type=str, shape=[], description=''' charmm input coordinates file. ''') x_charmm_inout_file_in_coor_str = Quantity( type=str, shape=[], description=''' charmm input coordinate on log file. ''') x_charmm_inout_file_input_vel = Quantity( type=str, shape=[], description=''' charmm input velocities file. ''') x_charmm_inout_file_restart_coord = Quantity( type=str, shape=[], description=''' charmm restart coordinates file. ''') x_charmm_inout_file_restart_vel = Quantity( type=str, shape=[], description=''' charmm restart velocities file. ''') x_charmm_inout_file_rtf_file = Quantity( type=str, shape=[], description=''' charmm RTF residue file. ''') x_charmm_inout_file_prm_file = Quantity( type=str, shape=[], description=''' charmm PRM parameter file. ''') x_charmm_inout_file_cor_file = Quantity( type=str, shape=[], description=''' charmm CRD coordinates file. ''') x_charmm_inout_file_stream = Quantity( type=str, shape=[], description=''' charmm stream input/output. ''') x_charmm_inout_file_rtf_str = Quantity( type=str, shape=[], description=''' charmm stream RTF input. ''') x_charmm_inout_file_par_str = Quantity( type=str, shape=[], description=''' charmm stream parameter input. ''') x_charmm_inout_file_output_log = Quantity( type=str, shape=[], description=''' charmm MD output log file. ''') x_charmm_inout_control_gaussian_option_is = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_crystal = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_crystal_type = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_a_length = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_b_length = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_c_length = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_alpha = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_beta = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_gamma = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_mini = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_nstep = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_inbfrq = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_step = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_prtmin = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_tolfun = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_tolgrd = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_tolitr = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_tolstp = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_tfreq = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_pcut = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_ihbfrq = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_ncgcyc = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_nprint = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_nonbond_option_flags = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_cutnb = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_ctexnb = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_ctonnb = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_ctofnb = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_cgonnb = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_cgofnb = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_wmin = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_cdie = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_switch = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_vswitch = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_atoms = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_wrnmxd = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_e14fac = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_eps = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_nbxmod = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hydrogen_bond_cutoff_distance = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hydrogen_bond_cutoff_angle = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hydrogen_bond_switching_on_distance = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hydrogen_bond_switching_off_distance = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hydrogen_bond_switching_on_angle = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hydrogen_bond_switching_off_angle = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hbond_exclusions_due_to_distance_cutoff = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hbond_exclusions_due_to_angle_cutoff = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_acceptor_antecedents = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hbond_exclusions_due_to_duplications = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hbond_deletions_due_to_best_option = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hbond_deletions_due_to_duplications = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hbond_deletions_due_to_fixed_atoms = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hbond_deletions_due_to_exclusion = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_hydrogen_bonds_present = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_minimization_exit_status = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_dyna = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_akmastp = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_firstt = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_iseed = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_iprfrq = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_ihtfrq = Quantity( type=str, shape=[], description=''' charmm running environment and control parameters. ''') x_charmm_inout_control_ieqfrq
#!/usr/bin/env python # # Copyright (C) 2007 Lemur Consulting Ltd # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. r"""fieldactions.py: Definitions and implementations of field actions. """ __docformat__ = "restructuredtext en" import _checkxapian import errors import marshall from replaylog import log import xapian import parsedate def _act_store_content(fieldname, doc, value, context): """Perform the STORE_CONTENT action. """ try: fielddata = doc.data[fieldname] except KeyError: fielddata = [] doc.data[fieldname] = fielddata fielddata.append(value) def _act_index_exact(fieldname, doc, value, context): """Perform the INDEX_EXACT action. """ doc.add_term(fieldname, value, 0) def _act_tag(fieldname, doc, value, context): """Perform the TAG action. """ doc.add_term(fieldname, value.lower(), 0) def _act_facet(fieldname, doc, value, context, type=None): """Perform the FACET action. """ if type is None or type == 'string': value = value.lower() doc.add_term(fieldname, value, 0) serialiser = log(xapian.StringListSerialiser, doc.get_value(fieldname, 'facet')) serialiser.append(value) doc.add_value(fieldname, serialiser.get(), 'facet') else: marshaller = SortableMarshaller() fn = marshaller.get_marshall_function(fieldname, type) doc.add_value(fieldname, fn(fieldname, value), 'facet') def _act_index_freetext(fieldname, doc, value, context, weight=1, language=None, stop=None, spell=False, nopos=False, allow_field_specific=True, search_by_default=True): """Perform the INDEX_FREETEXT action. """ termgen = log(xapian.TermGenerator) if language is not None: termgen.set_stemmer(log(xapian.Stem, language)) if stop is not None: stopper = log(xapian.SimpleStopper) for term in stop: stopper.add (term) termgen.set_stopper (stopper) if spell: termgen.set_database(context.index) termgen.set_flags(termgen.FLAG_SPELLING) termgen.set_document(doc._doc) if search_by_default: termgen.set_termpos(context.current_position) # Store a copy of the field without a prefix, for non-field-specific # searches. if nopos: termgen.index_text_without_positions(value, weight, '') else: termgen.index_text(value, weight, '') if allow_field_specific: # Store a second copy of the term with a prefix, for field-specific # searches. prefix = doc._fieldmappings.get_prefix(fieldname) if len(prefix) != 0: termgen.set_termpos(context.current_position) if nopos: termgen.index_text_without_positions(value, weight, prefix) else: termgen.index_text(value, weight, prefix) # Add a gap between each field instance, so that phrase searches don't # match across instances. termgen.increase_termpos(10) context.current_position = termgen.get_termpos() class SortableMarshaller(object): """Implementation of marshalling for sortable values. """ def __init__(self, indexing=True): if indexing: self._err = errors.IndexerError else: self._err = errors.SearchError def marshall_string(self, fieldname, value): """Marshall a value for sorting in lexicograpical order. This returns the input as the output, since strings already sort in lexicographical order. """ return value def marshall_float(self, fieldname, value): """Marshall a value for sorting as a floating point value. """ # convert the value to a float try: value = float(value) except ValueError: raise self._err("Value supplied to field %r must be a " "valid floating point number: was %r" % (fieldname, value)) return marshall.float_to_string(value) def marshall_date(self, fieldname, value): """Marshall a value for sorting as a date. """ try: value = parsedate.date_from_string(value) except ValueError, e: raise self._err("Value supplied to field %r must be a " "valid date: was %r: error is '%s'" % (fieldname, value, str(e))) return marshall.date_to_string(value) def get_marshall_function(self, fieldname, sorttype): """Get a function used to marshall values of a given sorttype. """ try: return { None: self.marshall_string, 'string': self.marshall_string, 'float': self.marshall_float, 'date': self.marshall_date, }[sorttype] except KeyError: raise self._err("Unknown sort type %r for field %r" % (sorttype, fieldname)) def _act_sort_and_collapse(fieldname, doc, value, context, type=None): """Perform the SORTABLE action. """ marshaller = SortableMarshaller() fn = marshaller.get_marshall_function(fieldname, type) value = fn(fieldname, value) doc.add_value(fieldname, value, 'collsort') class ActionContext(object): """The context in which an action is performed. This is just used to pass term generators, word positions, and the like around. """ def __init__(self, index): self.current_language = None self.current_position = 0 self.index = index class FieldActions(object): """An object describing the actions to be performed on a field. The supported actions are: - `STORE_CONTENT`: store the unprocessed content of the field in the search engine database. All fields which need to be displayed or used when displaying the search results need to be given this action. - `INDEX_EXACT`: index the exact content of the field as a single search term. Fields whose contents need to be searchable as an "exact match" need to be given this action. - `INDEX_FREETEXT`: index the content of this field as text. The content will be split into terms, allowing free text searching of the field. Four optional parameters may be supplied: - 'weight' is a multiplier to apply to the importance of the field. This must be an integer, and the default value is 1. - 'language' is the language to use when processing the field. This can be expressed as an ISO 2-letter language code. The supported languages are those supported by the xapian core in use. - 'stop' is an iterable of stopwords to filter out of the generated terms. Note that due to Xapian design, only non-positional terms are affected, so this is of limited use. - 'spell' is a boolean flag - if true, the contents of the field will be used for spelling correction. - 'nopos' is a boolean flag - if true, positional information is not stored. - 'allow_field_specific' is a boolean flag - if False, prevents terms with the field prefix being generated. This means that searches specific to this field will not work, and thus should only be used when only non-field specific searches are desired. Defaults to True. - 'search_by_default' is a boolean flag - if False, the field will not be searched by non-field specific searches. If True, or omitted, the field will be included in searches for non field-specific searches. - `SORTABLE`: index the content of the field such that it can be used to sort result sets. It also allows result sets to be restricted to those documents with a field values in a given range. One optional parameter may be supplied: - 'type' is a value indicating how to sort the field. It has several possible values: - 'string' - sort in lexicographic (ie, alphabetical) order. This is the default, used if no type is set. - 'float' - treat the values as (decimal representations of) floating point numbers, and sort in numerical order. The values in the field must be valid floating point numbers (according to Python's float() function). - 'date' - sort in date order. The values must be valid dates (either Python datetime.date objects, or ISO 8601 format (ie, YYYYMMDD or YYYY-MM-DD). - `COLLAPSE`: index the content of the field such that it can be used to "collapse" result sets, such that only the highest result with each value of the field will be returned. - `TAG`: the field contains tags; these are strings, which will be matched in a case insensitive way, but otherwise must be exact matches. Tag fields can be searched for by making an explict query (ie, using query_field(), but not with query_parse()). A list of the most frequent tags in a result set can also be accessed easily. - `FACET`: the field represents a classification facet; these are strings which will be matched exactly, but a list of all the facets present in the result set can also be accessed easily - in addition, a suitable subset of the facets, and a selection of the facet values, present in the result set can be calculated. One optional parameter may be supplied: - 'type' is a value indicating the type of facet contained in the field: - 'string' - the facet values are exact binary strings. - 'float' - the facet values are floating point numbers. """ # See the class docstring for the meanings of the following constants. STORE_CONTENT = 1 INDEX_EXACT = 2 INDEX_FREETEXT = 3 SORTABLE = 4 COLLAPSE = 5 TAG = 6 FACET = 7
<reponame>shaminmeerankutty/connect-python-sdk # coding: utf-8 """ Copyright 2017 Square, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems import re class V1Item(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, id=None, name=None, description=None, type=None, color=None, abbreviation=None, visibility=None, available_online=None, master_image=None, category=None, variations=None, modifier_lists=None, fees=None, taxable=None, category_id=None, available_for_pickup=None, v2_id=None): """ V1Item - a model defined in Swagger :param dict swaggerTypes: The key is attribute name and the value is attribute type. :param dict attributeMap: The key is attribute name and the value is json key in definition. """ self.swagger_types = { 'id': 'str', 'name': 'str', 'description': 'str', 'type': 'str', 'color': 'str', 'abbreviation': 'str', 'visibility': 'str', 'available_online': 'bool', 'master_image': 'V1ItemImage', 'category': 'V1Category', 'variations': 'list[V1Variation]', 'modifier_lists': 'list[V1Variation]', 'fees': 'list[V1Fee]', 'taxable': 'bool', 'category_id': 'str', 'available_for_pickup': 'bool', 'v2_id': 'str' } self.attribute_map = { 'id': 'id', 'name': 'name', 'description': 'description', 'type': 'type', 'color': 'color', 'abbreviation': 'abbreviation', 'visibility': 'visibility', 'available_online': 'available_online', 'master_image': 'master_image', 'category': 'category', 'variations': 'variations', 'modifier_lists': 'modifier_lists', 'fees': 'fees', 'taxable': 'taxable', 'category_id': 'category_id', 'available_for_pickup': 'available_for_pickup', 'v2_id': 'v2_id' } self._id = id self._name = name self._description = description self._type = type self._color = color self._abbreviation = abbreviation self._visibility = visibility self._available_online = available_online self._master_image = master_image self._category = category self._variations = variations self._modifier_lists = modifier_lists self._fees = fees self._taxable = taxable self._category_id = category_id self._available_for_pickup = available_for_pickup self._v2_id = v2_id @property def id(self): """ Gets the id of this V1Item. The item's ID. Must be unique among all entity IDs ever provided on behalf of the merchant. You can never reuse an ID. This value can include alphanumeric characters, dashes (-), and underscores (_). :return: The id of this V1Item. :rtype: str """ return self._id @id.setter def id(self, id): """ Sets the id of this V1Item. The item's ID. Must be unique among all entity IDs ever provided on behalf of the merchant. You can never reuse an ID. This value can include alphanumeric characters, dashes (-), and underscores (_). :param id: The id of this V1Item. :type: str """ self._id = id @property def name(self): """ Gets the name of this V1Item. The item's name. :return: The name of this V1Item. :rtype: str """ return self._name @name.setter def name(self, name): """ Sets the name of this V1Item. The item's name. :param name: The name of this V1Item. :type: str """ self._name = name @property def description(self): """ Gets the description of this V1Item. The item's description. :return: The description of this V1Item. :rtype: str """ return self._description @description.setter def description(self, description): """ Sets the description of this V1Item. The item's description. :param description: The description of this V1Item. :type: str """ self._description = description @property def type(self): """ Gets the type of this V1Item. The item's type. This value is NORMAL for almost all items. See [V1ItemType](#type-v1itemtype) for possible values :return: The type of this V1Item. :rtype: str """ return self._type @type.setter def type(self, type): """ Sets the type of this V1Item. The item's type. This value is NORMAL for almost all items. See [V1ItemType](#type-v1itemtype) for possible values :param type: The type of this V1Item. :type: str """ self._type = type @property def color(self): """ Gets the color of this V1Item. The color of the discount's display label in Square Register, if not the default color. The default color is 9da2a6. See [V1ItemColor](#type-v1itemcolor) for possible values :return: The color of this V1Item. :rtype: str """ return self._color @color.setter def color(self, color): """ Sets the color of this V1Item. The color of the discount's display label in Square Register, if not the default color. The default color is 9da2a6. See [V1ItemColor](#type-v1itemcolor) for possible values :param color: The color of this V1Item. :type: str """ self._color = color @property def abbreviation(self): """ Gets the abbreviation of this V1Item. The text of the item's display label in Square Register. Only up to the first five characters of the string are used. :return: The abbreviation of this V1Item. :rtype: str """ return self._abbreviation @abbreviation.setter def abbreviation(self, abbreviation): """ Sets the abbreviation of this V1Item. The text of the item's display label in Square Register. Only up to the first five characters of the string are used. :param abbreviation: The abbreviation of this V1Item. :type: str """ self._abbreviation = abbreviation @property def visibility(self): """ Gets the visibility of this V1Item. Indicates whether the item is viewable from the merchant's online store (PUBLIC) or PRIVATE. See [V1ItemVisibility](#type-v1itemvisibility) for possible values :return: The visibility of this V1Item. :rtype: str """ return self._visibility @visibility.setter def visibility(self, visibility): """ Sets the visibility of this V1Item. Indicates whether the item is viewable from the merchant's online store (PUBLIC) or PRIVATE. See [V1ItemVisibility](#type-v1itemvisibility) for possible values :param visibility: The visibility of this V1Item. :type: str """ self._visibility = visibility @property def available_online(self): """ Gets the available_online of this V1Item. If true, the item can be added to shipping orders from the merchant's online store. :return: The available_online of this V1Item. :rtype: bool """ return self._available_online @available_online.setter def available_online(self, available_online): """ Sets the available_online of this V1Item. If true, the item can be added to shipping orders from the merchant's online store. :param available_online: The available_online of this V1Item. :type: bool """ self._available_online = available_online @property def master_image(self): """ Gets the master_image of this V1Item. The item's master image, if any. :return: The master_image of this V1Item. :rtype: V1ItemImage """ return self._master_image @master_image.setter def master_image(self, master_image): """ Sets the master_image of this V1Item. The item's master image, if any. :param master_image: The master_image of this V1Item. :type: V1ItemImage """ self._master_image = master_image @property def category(self): """ Gets the category of this V1Item. The category the item belongs to, if any. :return: The category of this V1Item. :rtype: V1Category """ return self._category @category.setter def category(self, category): """ Sets the category of this V1Item. The category the item belongs to, if any. :param category: The category of this V1Item. :type: V1Category """ self._category = category @property def variations(self): """ Gets the variations of this V1Item. The item's variations. You must specify at least one variation. :return: The variations of this V1Item. :rtype: list[V1Variation] """ return self._variations @variations.setter def variations(self, variations): """ Sets the variations of this V1Item. The item's variations. You must specify at least one variation. :param variations: The variations of this V1Item. :type: list[V1Variation] """ self._variations = variations @property def modifier_lists(self): """ Gets the modifier_lists of this V1Item. The modifier lists that apply to the item, if any. :return: The modifier_lists of this V1Item. :rtype: list[V1Variation] """ return self._modifier_lists @modifier_lists.setter def modifier_lists(self, modifier_lists): """ Sets the modifier_lists of this V1Item. The modifier lists that apply to the item, if any. :param modifier_lists: The modifier_lists of this V1Item. :type: list[V1Variation] """ self._modifier_lists = modifier_lists @property def fees(self): """ Gets the fees of this V1Item. The fees that apply to the item, if any. :return: The fees of this V1Item. :rtype: list[V1Fee] """ return self._fees @fees.setter def fees(self, fees): """ Sets the fees of this V1Item. The fees that apply to the item, if any. :param fees: The fees of this V1Item. :type: list[V1Fee] """ self._fees = fees @property def taxable(self): """ Gets the taxable of this V1Item. Deprecated. This field is not used. :return: The taxable of this V1Item. :rtype: bool """ return self._taxable @taxable.setter def taxable(self, taxable): """ Sets the taxable of this V1Item.
- avg) ** 2 for xs in counts]) / var pval = spc.gammaincc(1.0 * len(blocks) / 2, chisqr / 2) return pval def occurances(string, sub): count=start=0 while True: start=string.find(sub,start)+1 if start>0: count+=1 else: return count def overlappingtemplatematchingtest(binin,mat="111111111",num=1032,numi=5): ''' The focus of this test is the number of pre-defined target substrings. The purpose of this test is to reject sequences that show deviations from the expected number of runs of ones of a given length. Note that when there is a deviation from the expected number of ones of a given length, there is also a deviation in the runs of zeroes. Runs of zeroes were not evaluated separately due to a concern about statistical independence among the tests. For this test and for the Non-overlapping Template Matching test, an m-bit window is used to search for a specific m-bit pattern. If the pattern is not found, the window slides one bit position. For this test, when the pattern is found, the window again slides one bit, and the search is resumed.''' n = len(binin) bign = int(n / num) m = len(mat) lamda = 1.0 * (num - m + 1) / 2 ** m eta = 0.5 * lamda pi = [pr(i, eta) for i in xrange(numi)] pi.append(1 - reduce(su, pi)) v = [0 for x in xrange(numi + 1)] blocks = stringpart(binin, num) blocklen = len(blocks[0]) counts = [occurances(i,mat) for i in blocks] counts2 = [(numi if xx > numi else xx) for xx in counts] for i in counts2: v[i] = v[i] + 1 chisqr = reduce(su, [(v[i]-bign*pi[i])** 2 / (bign*pi[i]) for i in xrange(numi + 1)]) pval = spc.gammaincc(0.5*numi, 0.5*chisqr) return pval def maurersuniversalstatistictest(binin,l=7,q=1280): ''' The focus of this test is the number of bits between matching patterns. The purpose of the test is to detect whether or not the sequence can be significantly compressed without loss of information. An overly compressible sequence is considered to be non-random.''' ru = [ [0.7326495, 0.690], [1.5374383, 1.338], [2.4016068, 1.901], [3.3112247, 2.358], [4.2534266, 2.705], [5.2177052, 2.954], [6.1962507, 3.125], [7.1836656, 3.238], [8.1764248, 3.311], [9.1723243, 3.356], [10.170032, 3.384], [11.168765, 3.401], [12.168070, 3.410], [13.167693, 3.416], [14.167488, 3.419], [15.167379, 3.421], ] blocks = [int(li, 2) + 1 for li in stringpart(binin, l)] k = len(blocks) - q states = [0 for x in xrange(2**l)] for x in xrange(q): states[blocks[x]-1]=x+1 sumi=0.0 for x in xrange(q,len(blocks)): sumi+=np.log2((x+1)-states[blocks[x]-1]) states[blocks[x]-1] = x+1 fn = sumi / k c=0.7-(0.8/l)+(4+(32.0/l))*((k**(-3.0/l))/15) sigma=c*np.sqrt((ru[l-1][1])/k) pval = spc.erfc(abs(fn-ru[l-1][0]) / (np.sqrt(2)*sigma)) return pval def lempelzivcompressiontest1(binin): ''' The focus of this test is the number of cumulatively distinct patterns (words) in the sequence. The purpose of the test is to determine how far the tested sequence can be compressed. The sequence is considered to be non-random if it can be significantly compressed. A random sequence will have a characteristic number of distinct patterns.''' i = 1 j = 0 n = len(binin) mu = 69586.25 sigma = 70.448718 words = [] while (i+j)<=n: tmp=binin[i:i+j:] if words.count(tmp)>0: j+=1 else: words.append(tmp) i+=j+1 j=0 wobs = len(words) pval = 0.5*spc.erfc((mu-wobs)/np.sqrt(2.0*sigma)) return pval def lempelzivcompressiontest(binin): ''' The focus of this test is the number of cumulatively distinct patterns (words) in the sequence. The purpose of the test is to determine how far the tested sequence can be compressed. The sequence is considered to be non-random if it can be significantly compressed. A random sequence will have a characteristic number of distinct patterns.''' i = 1 j = 0 n = len(binin) mu = 69586.25 sigma = 70.448718 words = [] while (i+j)<=n: tmp=binin[i:i+j:] if words.count(tmp)>0: j+=1 else: words.append(tmp) i+=j+1 j=0 wobs = len(words) pval = 0.5*spc.erfc((mu-wobs)/np.sqrt(2.0*sigma)) return pval # test 2.11 def serialtest(binin, m=16): ''' The focus of this test is the frequency of each and every overlapping m-bit pattern across the entire sequence. The purpose of this test is to determine whether the number of occurrences of the 2m m-bit overlapping patterns is approximately the same as would be expected for a random sequence. The pattern can overlap.''' n = len(binin) hbin=binin+binin[0:m-1:] f1a = [hbin[xs:m+xs:] for xs in xrange(n)] oo=set(f1a) f1 = [f1a.count(xs)**2 for xs in oo] f1 = map(f1a.count,oo) cou=f1a.count f2a = [hbin[xs:m-1+xs:] for xs in xrange(n)] f2 = [f2a.count(xs)**2 for xs in set(f2a)] f3a = [hbin[xs:m-2+xs:] for xs in xrange(n)] f3 = [f3a.count(xs)**2 for xs in set(f3a)] psim1 = 0 psim2 = 0 psim3 = 0 if m >= 0: suss = reduce(su,f1) psim1 = 1.0 * 2 ** m * suss / n - n if m >= 1: suss = reduce(su,f2) psim2 = 1.0 * 2 ** (m - 1) * suss / n - n if m >= 2: suss = reduce(su,f3) psim3 = 1.0 * 2 ** (m - 2) * suss / n - n d1 = psim1-psim2 d2 = psim1-2 * psim2 + psim3 pval1 = spc.gammaincc(2 ** (m - 2), d1 / 2.0) pval2 = spc.gammaincc(2 ** (m - 3), d2 / 2.0) return [pval1, pval2] def cumultativesumstest(binin): ''' The focus of this test is the maximal excursion (from zero) of the random walk defined by the cumulative sum of adjusted (-1, +1) digits in the sequence. The purpose of the test is to determine whether the cumulative sum of the partial sequences occurring in the tested sequence is too large or too small relative to the expected behavior of that cumulative sum for random sequences. This cumulative sum may be considered as a random walk. For a random sequence, the random walk should be near zero. For non-random sequences, the excursions of this random walk away from zero will be too large.''' n = len(binin) ss = [int(el) for el in binin] sc = map(sumi, ss) cs = np.cumsum(sc) z = max(abs(cs)) ra = 0 start = int(np.floor(0.25 * np.floor(-n / z) + 1)) stop = int(np.floor(0.25 * np.floor(n / z) - 1)) pv1 = [] for k in xrange(start, stop + 1): pv1.append(sst.norm.cdf((4 * k + 1) * z / np.sqrt(n)) - sst.norm.cdf((4 * k - 1) * z / np.sqrt(n))) start = int(np.floor(0.25 * np.floor(-n / z - 3))) stop = int(np.floor(0.25 * np.floor(n / z) - 1)) pv2 = [] for k in xrange(start, stop + 1): pv2.append(sst.norm.cdf((4 * k + 3) * z / np.sqrt(n)) - sst.norm.cdf((4 * k + 1) * z / np.sqrt(n))) pval = 1 pval -= reduce(su, pv1) pval += reduce(su, pv2) return pval def cumultativesumstestreverse(binin): '''The focus of this test is the maximal excursion (from zero) of the random walk defined by the cumulative sum of adjusted (-1, +1) digits in the sequence. The purpose of the test is to determine whether the cumulative sum of the partial sequences occurring in the tested sequence is too large or too small relative to the expected behavior of that cumulative sum for random sequences. This cumulative sum may be considered as a random walk. For a random sequence, the random walk should be near zero. For non-random sequences, the excursions of this random walk away from zero will be too large. ''' pval=cumultativesumstest(binin[::-1]) return pval def pik(k,x): if k==0: out=1-1.0/(2*np.abs(x)) elif k>=5: out=(1.0/(2*np.abs(x)))*(1-1.0/(2*np.abs(x)))**4 else: out=(1.0/(4*x*x))*(1-1.0/(2*np.abs(x)))**(k-1) return out def randomexcursionstest(binin): ''' The focus of this test is the number of cycles having exactly K visits in a cumulative sum random walk. The cumulative sum random walk is found if partial sums of the (0,1) sequence are adjusted to (-1, +1). A random excursion of a random walk consists of a sequence of n steps of unit length taken at random that begin at and return to the origin. The purpose of this test is to determine if the number of visits to a state within a random walk exceeds what one would expect for a random sequence.''' xvals=[-4, -3, -2, -1, 1, 2, 3, 4] ss = [int(el) for el
<reponame>akenmorris/ITK<filename>Wrapping/Generators/Python/Tests/extras.py # ========================================================================== # # Copyright NumFOCUS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ==========================================================================*/ # also test the import callback feature import itk import sys import os import numpy as np def custom_callback(name, progress): if progress == 0: print(f"Loading {name}...", file=sys.stderr) if progress == 1: print("done", file=sys.stderr) import itkConfig itkConfig.ImportCallback = custom_callback # test setting the number of threads itk.set_nthreads(4) assert itk.get_nthreads() == 4 # test the force load function itk.force_load() filename = sys.argv[1] mesh_filename = sys.argv[2] transform_filename = sys.argv[3] PixelType = itk.UC dim = 2 ImageType = itk.Image[PixelType, dim] ReaderType = itk.ImageFileReader[ImageType] reader = ReaderType.New(FileName=filename) # test snake_case keyword arguments reader = ReaderType.New(file_name=filename) # test echo itk.echo(reader) itk.echo(reader, sys.stdout) # test class_ assert itk.class_(reader) == ReaderType assert itk.class_("dummy") == str # test template assert itk.template(ReaderType) == (itk.ImageFileReader, (ImageType,)) assert itk.template(reader) == (itk.ImageFileReader, (ImageType,)) try: itk.template(str) raise Exception("unknown class should send an exception") except KeyError: pass # test ctype assert itk.ctype("unsigned short") == itk.US assert itk.ctype(" unsigned \n short \t ") == itk.US assert itk.ctype("signed short") == itk.SS assert itk.ctype("short") == itk.SS try: itk.ctype("dummy") raise Exception("unknown C type should send an exception") except KeyError: pass # test output assert itk.output(reader) == reader.GetOutput() assert itk.output(1) == 1 # test the deprecated image assert itk.image(reader) == reader.GetOutput() assert itk.image(1) == 1 # test size s = itk.size(reader) assert s[0] == s[1] == 256 s = itk.size(reader.GetOutput()) assert s[0] == s[1] == 256 # test physical size s = itk.physical_size(reader) assert s[0] == s[1] == 256.0 s = itk.physical_size(reader.GetOutput()) assert s[0] == s[1] == 256.0 # test spacing s = itk.spacing(reader) assert s[0] == s[1] == 1.0 s = itk.spacing(reader.GetOutput()) assert s[0] == s[1] == 1.0 # test origin s = itk.origin(reader) assert s[0] == s[1] == 0.0 s = itk.origin(reader.GetOutput()) assert s[0] == s[1] == 0.0 # test index s = itk.index(reader) assert s[0] == s[1] == 0 s = itk.index(reader.GetOutput()) assert s[0] == s[1] == 0 # test region s = itk.region(reader) assert s.GetIndex()[0] == s.GetIndex()[1] == 0 assert s.GetSize()[0] == s.GetSize()[1] == 256 s = itk.region(reader.GetOutput()) assert s.GetIndex()[0] == s.GetIndex()[1] == 0 assert s.GetSize()[0] == s.GetSize()[1] == 256 # test range assert itk.range(reader) == (0, 255) assert itk.range(reader.GetOutput()) == (0, 255) # test write itk.imwrite(reader, sys.argv[4]) itk.imwrite(reader, sys.argv[4], imageio=itk.PNGImageIO.New()) itk.imwrite(reader, sys.argv[4], True) # test read image = itk.imread(filename) assert type(image) == itk.Image[itk.RGBPixel[itk.UC], 2] image = itk.imread(filename, itk.F) assert type(image) == itk.Image[itk.F, 2] image = itk.imread(filename, itk.F, fallback_only=True) assert type(image) == itk.Image[itk.RGBPixel[itk.UC], 2] try: image = itk.imread(filename, fallback_only=True) # Should never reach this point if test passes since an exception # is expected. raise Exception("`itk.imread()` fallback_only should have failed") except Exception as e: if str(e) == "pixel_type must be set when using the fallback_only option": pass else: raise e image = itk.imread(filename, imageio=itk.PNGImageIO.New()) assert type(image) == itk.Image[itk.RGBPixel[itk.UC], 2] # Make sure we can read unsigned short, unsigned int, and cast image = itk.imread(filename, itk.UI) assert type(image) == itk.Image[itk.UI, 2] image = itk.imread(filename, itk.SI) assert type(image) == itk.Image[itk.SI, 2] as_float = image.astype(np.float32) assert type(as_float) == itk.Image[itk.F, 2] # test mesh read / write mesh = itk.meshread(mesh_filename) assert type(mesh) == itk.Mesh[itk.F, 3] mesh = itk.meshread(mesh_filename, itk.UC) assert type(mesh) == itk.Mesh[itk.UC, 3] mesh = itk.meshread(mesh_filename, itk.UC, fallback_only=True) assert type(mesh) == itk.Mesh[itk.F, 3] itk.meshwrite(mesh, sys.argv[5]) itk.meshwrite(mesh, sys.argv[5], compression=True) # test search res = itk.search("Index") assert res[0] == "Index" assert res[1] == "index" assert "ContinuousIndex" in res res = itk.search("index", True) assert "Index" not in res # test down_cast obj = itk.Object.cast(reader) # be sure that the reader is casted to itk::Object assert obj.__class__ == itk.Object down_casted = itk.down_cast(obj) assert down_casted == reader assert down_casted.__class__ == ReaderType # test setting the IO manually png_io = itk.PNGImageIO.New() assert png_io.GetFileName() == "" reader = itk.ImageFileReader.New(FileName=filename, ImageIO=png_io) reader.Update() assert png_io.GetFileName() == filename # test reading image series series_reader = itk.ImageSeriesReader.New(FileNames=[filename, filename]) series_reader.Update() assert series_reader.GetOutput().GetImageDimension() == 3 assert series_reader.GetOutput().GetLargestPossibleRegion().GetSize()[2] == 2 # test reading image series and check that dimension is not increased if # last dimension is 1. image_series = itk.Image[itk.UC, 3].New() image_series.SetRegions([10, 7, 1]) image_series.Allocate() image_series.FillBuffer(0) image_series3d_filename = os.path.join(sys.argv[6], "image_series_extras_py.mha") itk.imwrite(image_series, image_series3d_filename) series_reader = itk.ImageSeriesReader.New( FileNames=[image_series3d_filename, image_series3d_filename] ) series_reader.Update() assert series_reader.GetOutput().GetImageDimension() == 3 # test reading image series with itk.imread() image_series = itk.imread([filename, filename]) assert image_series.GetImageDimension() == 3 # Numeric series filename generation without any integer index. It is # only to produce an ITK object that users could set as an input to # `itk.ImageSeriesReader.New()` or `itk.imread()` and test that it works. numeric_series_filename = itk.NumericSeriesFileNames.New() numeric_series_filename.SetStartIndex(0) numeric_series_filename.SetEndIndex(3) numeric_series_filename.SetIncrementIndex(1) numeric_series_filename.SetSeriesFormat(filename) image_series = itk.imread(numeric_series_filename.GetFileNames()) number_of_files = len(numeric_series_filename.GetFileNames()) assert image_series.GetImageDimension() == 3 assert image_series.GetLargestPossibleRegion().GetSize()[2] == number_of_files # test reading image series with `itk.imread()` and check that dimension is # not increased if last dimension is 1. image_series = itk.imread([image_series3d_filename, image_series3d_filename]) assert image_series.GetImageDimension() == 3 baseline_parameters = np.array([0.6563149 , 0.58065837, -0.48175367, -0.74079868, 0.37486398, -0.55739959, -0.14306664, 0.72271215, 0.67617978, -66., 69., 32.]) baseline_fixed_parameters = np.array([0., 0., 0.]) # test transform read / write transforms = itk.transformread(transform_filename) fixed_parameters = np.asarray(transforms[0].GetFixedParameters()) parameters = np.asarray(transforms[0].GetParameters()) assert np.allclose(fixed_parameters, baseline_fixed_parameters) assert np.allclose(parameters, baseline_parameters) additional_transform = itk.TranslationTransform[itk.D, 3].New() baseline_additional_transform_params = [3.0, 2.0, 8.0] parameters = additional_transform.GetParameters() parameters[0] = baseline_additional_transform_params[0] parameters[1] = baseline_additional_transform_params[1] parameters[2] = baseline_additional_transform_params[2] additional_transform.SetParameters(parameters) transforms.insert(0, additional_transform) itk.transformwrite(transforms, sys.argv[7], compression=True) transforms = itk.transformread(sys.argv[7]) fixed_parameters = np.asarray(transforms[1].GetFixedParameters()) parameters = np.asarray(transforms[1].GetParameters()) assert np.allclose(fixed_parameters, baseline_fixed_parameters) assert np.allclose(parameters, baseline_parameters) parameters = np.asarray(transforms[0].GetParameters()) assert np.allclose(parameters, np.array(baseline_additional_transform_params)) # pipeline, auto_pipeline and templated class are tested in other files # BridgeNumPy # Images image = itk.imread(filename) arr = itk.array_from_image(image) arr.fill(1) assert np.any(arr != itk.array_from_image(image)) arr = itk.array_from_image(image) arr.fill(1) assert np.any(arr != itk.array_from_image(image)) view = itk.GetArrayViewFromImage(image) view.fill(1) assert np.all(view == itk.array_from_image(image)) image = itk.image_from_array(arr) image.FillBuffer(2) assert np.any(arr != itk.array_from_image(image)) image = itk.GetImageViewFromArray(arr) image.FillBuffer(2) assert np.all(arr == itk.array_from_image(image)) image = itk.image_from_array(arr, is_vector=True) assert image.GetImageDimension() == 2 image = itk.GetImageViewFromArray(arr, is_vector=True) assert image.GetImageDimension() == 2 arr = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.uint8) assert arr.shape[0] == 2 assert arr.shape[1] == 3 assert arr[1, 1] == 5 image = itk.image_from_array(arr) arrKeepAxes = itk.array_from_image(image, keep_axes=True) assert arrKeepAxes.shape[0] == 3 assert arrKeepAxes.shape[1] == 2 assert arrKeepAxes[1, 1] == 4 arr = itk.array_from_image(image, keep_axes=False) assert arr.shape[0] == 2 assert arr.shape[1] == 3 assert arr[1, 1] == 5 arrKeepAxes = itk.GetArrayViewFromImage(image, keep_axes=True) assert arrKeepAxes.shape[0] == 3 assert arrKeepAxes.shape[1] == 2 assert arrKeepAxes[1, 1] == 4 arr = itk.GetArrayViewFromImage(image, keep_axes=False) assert arr.shape[0] == 2 assert arr.shape[1] == 3 assert arr[1, 1] == 5 arr = arr.copy() image = itk.image_from_array(arr) image2 = type(image).New() image2.Graft(image) del image # Delete image but pixel data should be kept in img2 image = itk.image_from_array(arr + 1) # Fill former memory if wrongly released assert np.array_equal(arr, itk.GetArrayViewFromImage(image2)) image2.SetPixel( [0] * image2.GetImageDimension(), 3 ) # For mem check in dynamic analysis # VNL Vectors v1 = itk.vnl_vector.D(2) v1.fill(1) v_np = itk.GetArrayFromVnlVector(v1) assert v1.get(0) == v_np[0] v_np[0] = 0 assert v1.get(0) != v_np[0] view = itk.GetArrayViewFromVnlVector(v1) assert v1.get(0) == view[0] view[0] = 0 assert v1.get(0) == view[0] # VNL Matrices m1 = itk.vnl_matrix.D(2, 2) m1.fill(1) m_np = itk.GetArrayFromVnlMatrix(m1) assert m1.get(0, 0) == m_np[0, 0] m_np[0, 0] = 0 assert m1.get(0, 0) != m_np[0, 0] view = itk.GetArrayViewFromVnlMatrix(m1) assert m1.get(0, 0) == view[0, 0] view[0, 0] = 0 assert m1.get(0, 0) == view[0, 0] arr = np.zeros([3, 3]) m_vnl = itk.GetVnlMatrixFromArray(arr) assert m_vnl(0, 0) == 0 m_vnl.put(0, 0, 3) assert m_vnl(0, 0) == 3 assert arr[0, 0] == 0 # ITK Matrix arr = np.zeros([3, 3], float) m_itk = itk.GetMatrixFromArray(arr) # Test snake case function m_itk = itk.matrix_from_array(arr) m_itk.SetIdentity() # Test that the numpy array has not changed,... assert arr[0, 0] == 0 # but that the ITK matrix has the correct value. assert m_itk(0, 0) == 1 arr2 = itk.GetArrayFromMatrix(m_itk) # Check that snake case function also works arr2 = itk.array_from_matrix(m_itk) # Check that the new array has the new value. assert arr2[0, 0] == 1 arr2[0, 0] = 2 # Change the array value,... assert arr2[0, 0] == 2 # and make sure that the matrix hasn't changed. assert m_itk(0, 0) == 1 # test .astype for itk.Image numpyImage = np.random.randint(0, 256, (8, 12, 5)).astype(np.uint8) image = itk.image_from_array(numpyImage, is_vector=False) assert type(image) == type(itk.image_from_array(numpyImage, ttype=(type(image),))) assert type(image) == type(itk.image_from_array(numpyImage, ttype=[type(image)])) assert type(image) == type(itk.image_from_array(numpyImage, ttype=type(image))) cast = image.astype(np.uint8) assert cast == image (input_image_template, (input_pixel_type, input_image_dimension)) = itk.template(image) assert hasattr(itk.CastImageFilter, "IUC3IF3") for t in [ [itk.F, np.float32, "IUC3IF3"], [itk.SS, np.int16, "IUC3ISS3"], [itk.UI, np.uint32, "IUC3IUI3"], [np.float32, np.float32, "IUC3IF3"], ]: if hasattr(itk.CastImageFilter, t[2]): cast = image.astype(t[0]) (cast_image_template, (cast_pixel_type, cast_image_dimension)) = itk.template( cast ) assert ( cast_image_template == input_image_template and cast_image_dimension == input_image_dimension and cast.dtype == t[1] ) # test .astype for itk.VectorImage numpyImage = np.random.randint(0, 256, (8, 5, 3)).astype(np.float32) image = itk.image_from_array(numpyImage, is_vector=True) assert type(image) == type(itk.image_from_array(numpyImage, ttype=(type(image),))) assert type(image) == type(itk.image_from_array(numpyImage, ttype=[type(image)])) assert type(image) == type(itk.image_from_array(numpyImage, ttype=type(image))) ImageVectorsType = itk.Image[itk.Vector[itk.F, 3], 2] imagevectors = itk.cast_image_filter( Input=image, ttype=(type(image), ImageVectorsType) ) assert type(imagevectors) == ImageVectorsType cast = image.astype(np.float32) assert cast == image (vector_image_template, (vector_pixel_type, vector_image_dimension)) = itk.template( image ) for t in [ [itk.D, np.float64, "VIF2VID2"], [itk.SS, np.int16, "VIF2VISS2"], [itk.UI, np.uint32, "VIF2VIUI2"], [np.float64, np.float64, "VIF2VID2"], ]: if hasattr(itk.CastImageFilter, t[2]): cast = image.astype(t[0]) (cast_image_template, (cast_pixel_type, cast_image_dimension)) = itk.template( cast ) assert ( cast_image_template == vector_image_template and cast_image_dimension == vector_image_dimension and cast.dtype == t[1] ) # Test .astype for conversion between vector-like pixel types. components = 3 numpyImage = np.random.randint(0, 256, (12, 8, components)).astype(np.uint8) input_image = itk.image_from_array(numpyImage, is_vector=True) if type(input_image) == itk.Image[itk.RGBPixel[itk.UC], 2] and hasattr( itk.CastImageFilter, "IRGBUC2IVF32" ): output_pixel_type = itk.Vector[itk.F, components] output_image = input_image.astype(output_pixel_type) assert type(output_image) == itk.Image[output_pixel_type, 2] if '(<itkCType unsigned char>, 4)' in itk.Image.GetTypesAsList(): arr
# encoding: utf-8 # # Copyright (C) 2015-2019 ycmd contributors # # This file is part of ycmd. # # ycmd is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ycmd is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ycmd. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function from __future__ import division # Not installing aliases from python-future; it's unreliable and slow. from builtins import * # noqa import json import requests import ycm_core from mock import patch from nose.tools import eq_ from hamcrest import ( all_of, assert_that, contains, contains_inanyorder, empty, has_item, has_items, has_entry, has_entries, is_not ) from ycmd import handlers from ycmd.completers.cpp.clang_completer import ( NO_COMPLETIONS_MESSAGE, NO_COMPILE_FLAGS_MESSAGE, PARSING_FILE_MESSAGE ) from ycmd.responses import UnknownExtraConf, NoExtraConfDetected from ycmd.tests.clang import ( IsolatedYcmd, MockCoreClangCompleter, PathToTestFile, SharedYcmd ) from ycmd.tests.test_utils import ( BuildRequest, ChunkMatcher, CombineRequest, CompletionEntryMatcher, ErrorMatcher, LocationMatcher, WindowsOnly ) from ycmd.utils import ReadFile NO_COMPLETIONS_ERROR = ErrorMatcher( RuntimeError, NO_COMPLETIONS_MESSAGE ) def RunTest( app, test ): """ Method to run a simple completion test and verify the result Note: by default uses the .ycm_extra_conf from general_fallback/ which: - supports cpp, c and objc - requires extra_conf_data containing 'filetype&' = the filetype This should be sufficient for many standard test cases. If not, specify a path (as a list of path items) in 'extra_conf' member of |test|. test is a dictionary containing: 'request': kwargs for BuildRequest 'expect': { 'response': server response code (e.g. requests.codes.ok) 'data': matcher for the server response json } 'extra_conf': [ optional list of path items to extra conf file ] """ extra_conf = ( test[ 'extra_conf' ] if 'extra_conf' in test else [ 'general_fallback', '.ycm_extra_conf.py' ] ) app.post_json( '/load_extra_conf_file', { 'filepath': PathToTestFile( *extra_conf ) } ) request = test[ 'request' ] contents = ( request[ 'contents' ] if 'contents' in request else ReadFile( request[ 'filepath' ] ) ) # Because we aren't testing this command, we *always* ignore errors. This # is mainly because we (may) want to test scenarios where the completer # throws an exception and the easiest way to do that is to throw from # within the Settings function. app.post_json( '/event_notification', CombineRequest( request, { 'event_name': 'FileReadyToParse', 'contents': contents, } ), expect_errors = True ) # We also ignore errors here, but then we check the response code ourself. # This is to allow testing of requests returning errors. response = app.post_json( '/completions', CombineRequest( request, { 'contents': contents } ), expect_errors = True ) eq_( response.status_code, test[ 'expect' ][ 'response' ] ) print( 'Completer response: {0}'.format( json.dumps( response.json, indent = 2 ) ) ) assert_that( response.json, test[ 'expect' ][ 'data' ] ) @SharedYcmd def GetCompletions_ForcedWithNoTrigger_test( app ): RunTest( app, { 'description': 'semantic completion with force query=DO_SO', 'request': { 'filetype' : 'cpp', 'filepath' : PathToTestFile( 'general_fallback', 'lang_cpp.cc' ), 'line_num' : 54, 'column_num': 8, 'extra_conf_data': { '&filetype': 'cpp' }, 'force_semantic': True, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': contains( CompletionEntryMatcher( 'DO_SOMETHING_TO', 'void' ), CompletionEntryMatcher( 'DO_SOMETHING_WITH', 'void' ), ), 'errors': empty(), } ) }, } ) # This test is isolated to make sure we trigger c hook for clangd, instead of # fetching completer from cache. @IsolatedYcmd() def GetCompletions_Fallback_NoSuggestions_test( app ): RunTest( app, { 'description': 'Triggered, fallback but no query so no completions', 'request': { 'filetype' : 'c', 'filepath' : PathToTestFile( 'general_fallback', 'lang_c.c' ), 'line_num' : 29, 'column_num': 21, 'extra_conf_data': { '&filetype': 'c' }, 'force_semantic': False, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': empty(), 'errors': has_item( NO_COMPLETIONS_ERROR ), } ) }, } ) @SharedYcmd def GetCompletions_Fallback_NoSuggestions_MinimumCharacters_test( app ): RunTest( app, { 'description': 'fallback general completion obeys min chars setting ' ' (query="a")', 'request': { 'filetype' : 'cpp', 'filepath' : PathToTestFile( 'general_fallback', 'lang_c.c' ), 'line_num' : 29, 'column_num': 22, 'extra_conf_data': { '&filetype': 'c' }, 'force_semantic': False, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': empty(), 'errors': has_item( NO_COMPLETIONS_ERROR ), } ) }, } ) @SharedYcmd def GetCompletions_Fallback_Suggestions_test( app ): RunTest( app, { 'description': '. after macro with some query text (.a_)', 'request': { 'filetype' : 'c', 'filepath' : PathToTestFile( 'general_fallback', 'lang_c.c' ), 'line_num' : 29, 'column_num': 23, 'extra_conf_data': { '&filetype': 'c' }, 'force_semantic': False, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': has_item( CompletionEntryMatcher( 'a_parameter', '[ID]' ) ), 'errors': has_item( NO_COMPLETIONS_ERROR ), } ) }, } ) @SharedYcmd def GetCompletions_Fallback_Exception_test( app ): # extra conf throws exception RunTest( app, { 'description': '. on struct returns identifier because of error', 'request': { 'filetype' : 'c', 'filepath' : PathToTestFile( 'general_fallback', 'lang_c.c' ), 'line_num' : 62, 'column_num': 20, 'extra_conf_data': { '&filetype': 'c', 'throw': 'testy' }, 'force_semantic': False, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': contains( CompletionEntryMatcher( 'a_parameter', '[ID]' ), CompletionEntryMatcher( 'another_parameter', '[ID]' ), ), 'errors': has_item( ErrorMatcher( ValueError, 'testy' ) ) } ) }, } ) @SharedYcmd def GetCompletions_Forced_NoFallback_test( app ): RunTest( app, { 'description': '-> after macro with forced semantic', 'request': { 'filetype' : 'c', 'filepath' : PathToTestFile( 'general_fallback', 'lang_c.c' ), 'line_num' : 41, 'column_num': 30, 'extra_conf_data': { '&filetype': 'c' }, 'force_semantic': True, }, 'expect': { 'response': requests.codes.internal_server_error, 'data': NO_COMPLETIONS_ERROR, }, } ) @SharedYcmd def GetCompletions_FilteredNoResults_Fallback_test( app ): # no errors because the semantic completer returned results, but they # were filtered out by the query, so this is considered working OK # (whereas no completions from the semantic engine is considered an # error) RunTest( app, { 'description': '. on struct returns IDs after query=do_', 'request': { 'filetype': 'c', 'filepath': PathToTestFile( 'general_fallback', 'lang_c.c' ), 'line_num': 71, 'column_num': 18, 'extra_conf_data': { '&filetype': 'c' }, 'force_semantic': False, }, 'expect': { 'response': requests.codes.ok, 'data': has_entries( { 'completions': contains_inanyorder( # do_ is an identifier because it is already in the file when we # load it CompletionEntryMatcher( 'do_', '[ID]' ), CompletionEntryMatcher( 'do_something', '[ID]' ), CompletionEntryMatcher( 'do_another_thing', '[ID]' ), CompletionEntryMatcher( 'DO_SOMETHING_TO', '[ID]' ), CompletionEntryMatcher( 'DO_SOMETHING_VIA', '[ID]' ) ), 'errors': empty() } ) }, } ) @IsolatedYcmd() def GetCompletions_WorksWithExplicitFlags_test( app ): app.post_json( '/ignore_extra_conf_file', { 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } ) contents = """ struct Foo { int x; int y; char c; }; int main() { Foo foo; foo. } """ completion_data = BuildRequest( filepath = '/foo.cpp', filetype = 'cpp', contents = contents, line_num = 11, column_num = 7, compilation_flags = [ '-x', 'c++' ] ) response_data = app.post_json( '/completions', completion_data ).json assert_that( response_data[ 'completions' ], has_items( CompletionEntryMatcher( 'c' ), CompletionEntryMatcher( 'x' ), CompletionEntryMatcher( 'y' ) ) ) eq_( 7, response_data[ 'completion_start_column' ] ) @IsolatedYcmd( { 'auto_trigger': 0 } ) def GetCompletions_NoCompletionsWhenAutoTriggerOff_test( app ): app.post_json( '/ignore_extra_conf_file', { 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } ) contents = """ struct Foo { int x; int y; char c; }; int main() { Foo foo; foo. } """ completion_data = BuildRequest( filepath = '/foo.cpp', filetype = 'cpp', contents = contents, line_num = 11, column_num = 7, compilation_flags = [ '-x', 'c++' ] ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, empty() ) @IsolatedYcmd() def GetCompletions_UnknownExtraConfException_test( app ): filepath = PathToTestFile( 'basic.cpp' ) completion_data = BuildRequest( filepath = filepath, filetype = 'cpp', contents = ReadFile( filepath ), line_num = 11, column_num = 7, force_semantic = True ) response = app.post_json( '/completions', completion_data, expect_errors = True ) eq_( response.status_code, requests.codes.internal_server_error ) assert_that( response.json, has_entry( 'exception', has_entry( 'TYPE', UnknownExtraConf.__name__ ) ) ) app.post_json( '/ignore_extra_conf_file', { 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } ) response = app.post_json( '/completions', completion_data, expect_errors = True ) eq_( response.status_code, requests.codes.internal_server_error ) assert_that( response.json, has_entry( 'exception', has_entry( 'TYPE', NoExtraConfDetected.__name__ ) ) ) @IsolatedYcmd() def GetCompletions_WorksWhenExtraConfExplicitlyAllowed_test( app ): app.post_json( '/load_extra_conf_file', { 'filepath': PathToTestFile( '.ycm_extra_conf.py' ) } ) filepath = PathToTestFile( 'basic.cpp' ) completion_data = BuildRequest( filepath = filepath, filetype = 'cpp', contents = ReadFile( filepath ), line_num = 11, column_num = 7 ) results = app.post_json( '/completions', completion_data ).json[ 'completions' ] assert_that( results, has_items( CompletionEntryMatcher( 'c' ), CompletionEntryMatcher( 'x' ), CompletionEntryMatcher( 'y' ) ) ) @SharedYcmd def GetCompletions_ExceptionWhenNoFlagsFromExtraConf_test( app
makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/2/dostop' ) evtHS2Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/2/stopped' ) evtHS2Enable = makeEvent( 'http://id.webbrick.co.uk/events/config/get', 'zoneheatsource/2/enabled', {'val': 1} ) evtHS2Disable = makeEvent( 'http://id.webbrick.co.uk/events/config/get', 'zoneheatsource/2/enabled', {'val': 0} ) evtHS3RequestRun = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/requestrun' ) evtHS3RequestStop = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/requeststop' ) evtHS3CommonDoRun = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/dorun' ) evtHS3CommonDoStop = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/dostop' ) evtHS3EastDoRun = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/east/dorun' ) evtHS3EastDoStop = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/east/dostop' ) evtHS3SouthDoRun = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/south/dorun' ) evtHS3SouthDoStop = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/south/dostop' ) evtHS3WestDoRun = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/west/dorun' ) evtHS3WestDoStop = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/west/dostop' ) evtHS3Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/stopped' ) evtHS3Running = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/3/running' ) evtHS3PanelEastTemp5 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/1', {'val': 5.0} ) evtHS3PanelEastTemp25 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/1', {'val': 25.0} ) evtHS3PanelEastTemp95 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/1', {'val': 95.0} ) evtHS3PanelSouthTemp5 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/2', {'val': 5.0} ) evtHS3PanelSouthTemp25 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/2', {'val': 25.0} ) evtHS3PanelSouthTemp95 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/2', {'val': 95.0} ) evtHS3PanelWestTemp5 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/3', {'val': 5.0} ) evtHS3PanelWestTemp25 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/3', {'val': 25.0} ) evtHS3PanelWestTemp95 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/3', {'val': 95.0} ) evtHS3HeatExTemp5 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/6', {'val': 5.0} ) evtHS3HeatExTemp25 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/6', {'val': 25.0} ) evtHS3HeatExTemp95 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/903/CT/6', {'val': 95.0} ) evtHS4RequestRun = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/4/requestrun' ) evtHS4RequestStop = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/4/requeststop' ) evtHS4DoRun = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/4/dorun' ) evtHS4DoStop = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/4/dostop' ) evtHS4Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/4/stopped' ) evtHS4Running = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/4/running' ) evtHS4PanelTemp5 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/904/CT/0', {'val': 5.0} ) evtHS4PanelTemp25 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/904/CT/0', {'val': 25.0} ) evtHS4PanelTemp95 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/904/CT/0', {'val': 95.0} ) evtHS4HeatExTemp5 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/904/CT/5', {'val': 5.0} ) evtHS4HeatExTemp25 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/904/CT/5', {'val': 25.0} ) evtHS4HeatExTemp95 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/904/CT/5', {'val': 95.0} ) evtHS5RequestRun = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/requestrun' ) evtHS5DoRun = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/dorun' ) evtHS5Running = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/running' ) evtHS5RequestStop = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/requeststop' ) evtHS5DoStop = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/dostop' ) evtHS5Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/stopped' ) evtHS5_1DoRun = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/1/dorun' ) evtHS5_1DoStop = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/1/dostop' ) evtHS5_1Running = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/1/running' ) evtHS5_1Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/1/stopped' ) evtHS5_2DoRun = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/2/dorun' ) evtHS5_2DoStop = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/2/dostop' ) evtHS5_2Running = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/2/running' ) evtHS5_2Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/heatsource', 'heatsource/5/2/stopped' ) evtHS5FlowTemp80 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/905/CT/0', {'val': 80.0} ) evtHS5ReturnTemp60 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/905/CT/1', {'val': 60.0} ) evtHS5ReturnTemp75 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/905/CT/1', {'val': 75.0} ) evtZG1Running = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/1/running' ) evtZG1Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/1/stop' ) evtZG1Run = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/1/run' ) evtZG1Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/1/stopped' ) evtZG1HeatSource = makeEvent( 'http://id.webbrick.co.uk/zones/group/heatsource', 'zonegroup/1/heatsource' ) evtZG2Running = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/2/running' ) evtZG2Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/2/stop' ) evtZG2Run = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/2/run' ) evtZG2Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/2/stopped' ) evtZG2HeatSource = makeEvent( 'http://id.webbrick.co.uk/zones/group/heatsource', 'zonegroup/2/heatsource' ) evtZG3Running = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/3/running' ) evtZG3Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/3/stop' ) evtZG3Run = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/3/run' ) evtZG3Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/3/stopped' ) evtZG3HeatSource = makeEvent( 'http://id.webbrick.co.uk/zones/group/heatsource', 'zonegroup/3/heatsource' ) evtZG4Running = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/4/running' ) evtZG4Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/4/stop' ) evtZG4Run = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/4/run' ) evtZG4Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/4/stopped' ) evtZG4HeatSource = makeEvent( 'http://id.webbrick.co.uk/zones/group/heatsource', 'zonegroup/4/heatsource' ) evtZG5Running = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/5/running' ) evtZG5Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/5/stop' ) evtZG5Run = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/5/run' ) evtZG5Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/5/stopped' ) evtZG5HeatSource = makeEvent( 'http://id.webbrick.co.uk/zones/group/heatsource', 'zonegroup/5/heatsource' ) evtZG6Running = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/6/running' ) evtZG6Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/6/stop' ) evtZG6Run = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/6/run' ) evtZG6Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zonegroup', 'zonegroup/6/stopped' ) evtZG6HeatSource = makeEvent( 'http://id.webbrick.co.uk/zones/group/heatsource', 'zonegroup/6/heatsource' ) evtZone1Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone1/running' ) evtZone1Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone1/stop' ) evtZone1Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone1/run' ) evtZone1Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone1/stopped' ) evtZone2Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone2/running' ) evtZone2Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone2/stop' ) evtZone2Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone2/run' ) evtZone2Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone2/stopped' ) evtZone3Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone3/running' ) evtZone3Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone3/stop' ) evtZone3Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone3/run' ) evtZone3Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone3/stopped' ) evtZone4Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone4/running' ) evtZone4Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone4/stop' ) evtZone4Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone4/run' ) evtZone4Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone4/stopped' ) evtZone5Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone5/running' ) evtZone5Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone5/stop' ) evtZone5Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone5/run' ) evtZone5Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone5/stopped' ) evtZone6Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone6/running' ) evtZone6Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone6/stop' ) evtZone6Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone6/run' ) evtZone6Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone6/stopped' ) evtZone7Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone7/running' ) evtZone7Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone7/stop' ) evtZone7Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone7/run' ) evtZone7Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone7/stopped' ) evtZone8Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone8/running' ) evtZone8Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone8/stop' ) evtZone8Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone8/run' ) evtZone8Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone8/stopped' ) evtZone9Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone9/running' ) evtZone9Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone9/stop' ) evtZone9Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone9/run' ) evtZone9Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone9/stopped' ) evtZone10Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone10/running' ) evtZone10Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone10/stop' ) evtZone10Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone10/run' ) evtZone10Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone10/stopped' ) evtZone11Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone11/running' ) evtZone11Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone11/stop' ) evtZone11Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone11/run' ) evtZone11Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone11/stopped' ) evtZone12Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone12/running' ) evtZone12Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone12/stop' ) evtZone12Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone12/run' ) evtZone12Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone12/stopped' ) evtZone13Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone13/running' ) evtZone13Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone13/stop' ) evtZone13Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone13/run' ) evtZone13Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone13/stopped' ) evtZone14Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone14/running' ) evtZone14Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone14/stop' ) evtZone14Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone14/run' ) evtZone14Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone14/stopped' ) evtZone15Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone15/running' ) evtZone15Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone15/stop' ) evtZone15Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone15/run' ) evtZone15Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone15/stopped' ) evtZone16Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone16/running' ) evtZone16Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone16/stop' ) evtZone16Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone16/run' ) evtZone16Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone16/stopped' ) evtZone17Running = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone17/running' ) evtZone17Stop = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone17/stop' ) evtZone17Run = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone17/run' ) evtZone17Stopped = makeEvent( 'http://id.webbrick.co.uk/zones/zone', 'zone17/stopped' ) evtWeather1Run = makeEvent( 'http://id.webbrick.co.uk/zones/weather', 'weather/1', {'state':'Run'} ) evtWeather1HoldOff = makeEvent( 'http://id.webbrick.co.uk/zones/weather', 'weather/1', {'state':'HoldOff'} ) evtWeather2Run = makeEvent( 'http://id.webbrick.co.uk/zones/weather', 'weather/2', {'state':'Run'} ) evtWeather2HoldOff = makeEvent( 'http://id.webbrick.co.uk/zones/weather', 'weather/2', {'state':'HoldOff'} ) evtWeather3Run = makeEvent( 'http://id.webbrick.co.uk/zones/weather', 'weather/3', {'state':'Run'} ) evtWeather3HoldOff = makeEvent( 'http://id.webbrick.co.uk/zones/weather', 'weather/3', {'state':'HoldOff'} ) evtZone1Temp5 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/1/CT/0', {'val': 5.0} ) evtZone1Temp15 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/1/CT/0', {'val': 15.0} ) evtZone1Temp22 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/1/CT/0', {'val': 22.0} ) evtZone1SetPoint0 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone1/set', {'val': 0.0} ) evtZone1SetPoint14 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone1/set', {'val': 14.0} ) evtZone1SetPoint18 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone1/set', {'val': 18.0} ) evtZone1SetPoint25 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone1/set', {'val': 25.0} ) evtZone2Temp4 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/2/CT/0', {'val': 4.0} ) evtZone2Temp5 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/2/CT/0', {'val': 5.0} ) evtZone2Temp13 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/2/CT/0', {'val': 13.0} ) evtZone2Temp14 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/2/CT/0', {'val': 14.0} ) evtZone2Temp15 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/2/CT/0', {'val': 15.0} ) evtZone2Temp22 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/2/CT/0', {'val': 22.0} ) evtZone2SetPoint0 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone2/set', {'val': 0.0} ) evtZone2SetPoint14 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone2/set', {'val': 14.0} ) evtZone2SetPoint18 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone2/set', {'val': 18.0} ) evtZone2SetPoint25 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone2/set', {'val': 25.0} ) evtZone2ManSetPoint0 = makeEvent( 'http://id.webbrick.co.uk/events/zones/manual', 'zone2/manual/set', {'val': 0.0} ) evtZone2ManSetPoint8 = makeEvent( 'http://id.webbrick.co.uk/events/zones/manual', 'zone2/manual/set', {'val': 8.0} ) evtZone2ManSetPoint14 = makeEvent( 'http://id.webbrick.co.uk/events/zones/manual', 'zone2/manual/set', {'val': 14.0} ) evtZone2ManSetPoint18 = makeEvent( 'http://id.webbrick.co.uk/events/zones/manual', 'zone2/manual/set', {'val': 18.0} ) evtZone2ManSetPoint25 = makeEvent( 'http://id.webbrick.co.uk/events/zones/manual', 'zone2/manual/set', {'val': 25.0} ) evtZone3Temp15 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/3/CT/0', {'val': 15.0} ) evtZone3SetPoint14 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone3/set', {'val': 14.0} ) evtZone3SetPoint18 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone3/set', {'val': 18.0} ) evtZone4Temp15 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/4/CT/0', {'val': 15.0} ) evtZone4SetPoint14 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone4/set', {'val': 14.0} ) evtZone4SetPoint18 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone4/set', {'val': 18.0} ) evtZone5Temp15 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/5/CT/0', {'val': 15.0} ) evtZone5SetPoint14 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone5/set', {'val': 14.0} ) evtZone5SetPoint18 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone5/set', {'val': 18.0} ) evtZone5ManualSetPoint22 = makeEvent( 'http://id.webbrick.co.uk/events/zones/manual', 'zone5/manual/set', {'val': 22.0} ) evtZone17Temp5 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/17/CT/0', {'val': 5.0} ) evtZone17Temp15 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/17/CT/0', {'val': 15.0} ) evtZone17Temp22 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/17/CT/0', {'val': 22.0} ) evtZone17SetPoint14 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone17/set', {'val': 14.0} ) evtZone17SetPoint18 = makeEvent( 'http://id.webbrick.co.uk/events/schedule/control', 'zone17/set', {'val': 18.0} ) evtZone17ManualSetPoint22 = makeEvent( 'http://id.webbrick.co.uk/events/zones/manual', 'zone17/manual/set', {'val': 22.0} ) evtTemp5 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/5/CT/1', {'val': 5.0} ) evtTemp10 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/5/CT/1', {'val': 10.0} ) evtTemp15 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/5/CT/1', {'val': 15.0} ) evtTemp20 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/5/CT/1', {'val': 20.0} ) evtTemp22 = makeEvent( 'http://id.webbrick.co.uk/events/webbrick/CT', 'webbrick/5/CT/1', {'val': 22.0} ) evtOccupied = makeEvent( 'http://id.webbrick.co.uk/events/config/get', 'occupants/home', {'val': 1} ) evtUnOccupied = makeEvent( 'http://id.webbrick.co.uk/events/config/get', 'occupants/home', {'val': 0} ) evtZone2Enable = makeEvent( 'http://id.webbrick.co.uk/events/config/get', 'zone2/enabled', {'val': 1} ) evtZone2Disable = makeEvent( 'http://id.webbrick.co.uk/events/config/get', 'zone2/enabled', {'val': 0} ) evtZone2FrostStat0 = makeEvent( 'http://id.webbrick.co.uk/events/config/get', 'zone2/matStat', {'val': 0} ) evtZone2FrostStat5 = makeEvent( 'http://id.webbrick.co.uk/events/config/get', 'zone2/matStat', {'val': 5.0} ) evtZone2FrostStat9 = makeEvent( 'http://id.webbrick.co.uk/events/config/get', 'zone2/matStat', {'val': 9.0} ) evtZone2FrostStat16 = makeEvent( 'http://id.webbrick.co.uk/events/config/get', 'zone2/matStat', {'val': 16.0} ) masterSingleFileList = ( "../../../WebBrickGateway/resources/samples1/eventdespatch/zones_inactive/zonemaster_single_inputs.xml", "../../../WebBrickGateway/resources/eventdespatch/System/testdummies/zonemaster_outputs.xml", ) masterDualFileList = ( "../../../WebBrickGateway/resources/samples1/eventdespatch/zones_inactive/zonemaster_solar_and_boiler_inputs.xml", "../../../WebBrickGateway/resources/eventdespatch/System/testdummies/zonemaster_outputs.xml", ) masterMultipleFileList = ( "./resources/hvac/zonemaster_multiple_inputs.xml", "./resources/hvac/zonemaster_multiple_outputs.xml", ) heatSourceGenericFileList = ( "./resources/hvac/zoneheatsource_generic_inputs.xml", "./resources/hvac/zoneheatsource_generic_outputs.xml", ) # Oil/Gas Boiler heatSource1FileList = ( "./resources/hvac/zoneheatsource_boiler_1_inputs.xml", "./resources/hvac/zoneheatsource_boiler_1_outputs.xml", ) # ground source heat pump heatSource2FileList = ( "./resources/hvac/zoneheatsource_heatpump_2_inputs.xml", "./resources/hvac/zoneheatsource_heatpump_2_outputs.xml", ) # Multi Solar Panel heatSource3FileList = ( "./resources/hvac/zoneheatsource_multisolar_3_inputs.xml", "./resources/hvac/zoneheatsource_multisolar_3_outputs.xml", ) # Single Solar heatSource4FileList = ( "./resources/hvac/zoneheatsource_solar_4_inputs.xml", "./resources/hvac/zoneheatsource_solar_4_outputs.xml", ) # Multi Boiler heatSource5FileList = ( "./resources/hvac/zoneheatsource_multiboiler_5_inputs.xml", "./resources/hvac/zoneheatsource_multiboiler_5_outputs.xml", ) groupFileList = (
import getpass from datetime import datetime import collections from PySide2 import QtWidgets, QtCore from .. import constant as c from ..core.history import Player as HistoryPlayer from ..core.pgn import MOVES2PGN, PGN2MOVES from ..core.engineer import Engine from .widgets import ( BoardWidget, OptionWidget, MoveWidget, LoadGameWidget, SaveGameDataWidget, ChoosePlayerWidget, ToolBar, SelectPromotionWidget, CustomMessageBox, MovieGenerationThread, MovieProgressBar, ) GAME_OPTIONS = collections.namedtuple( 'GAME_OPTIONS', [ 'white_promotion', 'black_promotion', 'is_standard' ] ) class MainWidget(QtWidgets.QDialog): MOVE_SIGNAL = QtCore.Signal(tuple) GAME_RESET_SIGNAL = QtCore.Signal() UPDATE_BOARD_SIGNAL = QtCore.Signal() GAME_OPTIONS_SET_SIGNAL = QtCore.Signal(tuple) GAME_OVER_SIGNAL = QtCore.Signal(bool) BULK_MOVE_SIGNAL = QtCore.Signal(tuple) def __init__(self, board, parent=None): super().__init__(parent=parent) self._board = board self._board_widget = BoardWidget(board=self._board) self._moves_widget = MoveWidget() self._tool_bar = ToolBar() self._collapsed_width = None self._history_player = None self._engine_color = None self._engine = Engine() self._board = board self._current_player = c.Color.white self._winner = None self._bonus_time = c.GAME.DEFAULT_BONUS_TIME self._timer_white = QtCore.QTimer() self._timer_white.setInterval(1000) # timeout per 1 second self._remaining_time_white = c.GAME.DEFAULT_PLAY_TIME * 60 self._timer_black = QtCore.QTimer() self._timer_black.setInterval(1000) # timeout per 1 second self._remaining_time_black = c.GAME.DEFAULT_PLAY_TIME * 60 self._auto_flip = False self._is_paused = False self._is_game_over = False self._has_game_started = False self._game_loaded = False self._game_data = None self._history_player = None self._inspecting_history = False self._move_index = None self._white_player_name = getpass.getuser().capitalize() self._black_player_name = 'Opponent' self._game_date = None self._is_flipped = False # Custom Options self._custom_options_set = False self._custom_bonus_time = None self._custom_play_time = None self._custom_white_promotion = None self._custom_black_promotion = None self._custom_is_standard_type = None self._custom_auto_flip = None self._setup_ui() self._connect_signals() self._reset() def _reset(self): self.GAME_RESET_SIGNAL.emit() self._board_widget.reset() self._moves_widget.reset() self._engine_color = None self._history_player = None self._movie_generator = None self._current_player = c.Color.white self._winner = None self._bonus_time = c.GAME.DEFAULT_BONUS_TIME self._timer_white.stop() self._remaining_time_white = c.GAME.DEFAULT_PLAY_TIME * 60 self._timer_black.stop() self._remaining_time_black = c.GAME.DEFAULT_PLAY_TIME * 60 self._auto_flip = False self._is_paused = False self._is_game_over = False self._has_game_started = False self._game_loaded = False self._game_data = None self._history_player = None self._inspecting_history = False self._move_index = None self._white_player_name = getpass.getuser().capitalize() self._black_player_name = 'Opponent' self._game_date = None self._is_flipped = False self._board_widget.display_time_white(self._remaining_time_white) self._board_widget.display_time_black(self._remaining_time_black) self._toggle_left_widget(visibility=False) self._tool_bar.toogle_pause_icon(is_paused=self._is_paused) self._tool_bar.setVisible(True) self._adjust_size() def _reset_custom_options(self): self._custom_options_set = False self._custom_bonus_time = None self._custom_play_time = None self._custom_white_promotion = None self._custom_black_promotion = None self._custom_is_standard_type = None self._custom_auto_flip = False def update_move(self, game_data): self._game_data = game_data self._display_pgn_moves() self._board_widget.update_move(game_data=self._game_data) def update_invalid_move(self): self._board_widget.update_invalid_move() def set_current_player(self, color): self._add_bonus_time() self._stop_current_player_time() self._current_player = color self._board_widget.set_current_player(color) self._start_current_player_time() self._handle_engine_move() if self._engine_color is None: self._handle_auto_flip() def _handle_auto_flip(self): if not self._auto_flip: return self._flip_timer = QtCore.QTimer() self._flip_timer.setInterval(350) self._flip_timer.setSingleShot(True) self._flip_timer.timeout.connect(self._flip_board) self._flip_timer.start() def _flip_board(self): if self._current_player == c.Color.black: if self._is_flipped: return elif self._current_player == c.Color.white: if not self._is_flipped: return self._is_flipped = self._current_player == c.Color.black self._board_widget.is_flipped = self._is_flipped def _handle_engine_move(self): if self._engine_color is None: return if self._current_player != self._engine_color: return moves = [(m.src, m.dst) for m in self._game_data.move_history] best_move = self._engine.get_best_move(moves) if best_move is None: return self._engine_move_timer = QtCore.QTimer() self._engine_move_timer.setInterval(350) self._engine_move_timer.setSingleShot(True) self._engine_move_timer.timeout.connect( lambda: self._make_engine_move(best_move) ) self._engine_move_timer.start() def _make_engine_move(self, move): self.MOVE_SIGNAL.emit((move, None)) def update_board(self): self._board_widget.update_board() def game_over(self, winner): self._set_game_over() self._board_widget.game_over(winner) def stalemate(self): self._set_game_over() self._board_widget.stalemate() self._moves_widget.display_win(winning_text='1/2-1/2') def promotion_required(self, move_spec): w = SelectPromotionWidget(color=self._current_player) w.SELECTED_PROMOTION_SIGNAL.connect( lambda piece_type: self._recieved_move_string( move_string=move_spec, promotion=piece_type, ), ) w.show() def resizeEvent(self, event): if self._is_paused: return if self._game_loaded or self._has_game_started: self._handle_left_widget() def mouseDoubleClickEvent(self, event): if self._is_paused: return if not self._has_game_started: return self._tool_bar.setVisible(True) self._toggle_left_widget() def keyPressEvent(self, event): self._handle_keypress(event=event) @staticmethod def _is_key_pressed(event, key, modifier=None): result = event.key() == key if modifier is not None: result = result and event.modifiers() == modifier return result def _setup_ui(self): self.setStyleSheet('border: none;') layout = QtWidgets.QHBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) self._right_widget = self._create_right_widget() self._left_widget = self._create_left_widget() layout.addWidget(self._right_widget, 1) layout.addWidget(self._left_widget, 2) def _create_right_widget(self): widget = QtWidgets.QWidget() widget.setStyleSheet('border: none;') layout = QtWidgets.QHBoxLayout(widget) layout.addWidget(self._tool_bar) layout.addWidget(self._board_widget, 1) return widget def _create_left_widget(self): # Create a left widget widget = QtWidgets.QWidget() widget.setStyleSheet('border: none;') # Create left widget toggle btn self._collapse_btn = QtWidgets.QPushButton('<') self._collapse_btn.setStyleSheet('border: 1px solid rgb(90, 90, 90);') self._collapse_btn.setFixedWidth(c.APP.COLLAPSE_BTN_WIDTH) layout = QtWidgets.QHBoxLayout(widget) layout.addWidget(self._collapse_btn) layout.addWidget(self._moves_widget, 1) return widget def _connect_signals(self): # Board Widget signals bw = self._board_widget bw.MOVE_SIGNAL.connect(self._recieved_move_string) bw.RESIGN_BTN_CLICKED_SIGNAL.connect(self._resign) bw.ANIM_IN_PROGRESS_SIGNAL.connect(self._anim_in_progress) bw.ANIM_FINISHED_SIGNAL.connect(self._anim_finished) # Move Widget Signals mw = self._moves_widget mw.KEYPRESS_SIGNAL.connect(self._handle_keypress) mw.MOVE_SELECTED_SIGNAL.connect(self._move_selected) mw.FIRST_BTN_CLICKED_SIGNAL.connect(self._first_btn_clicked) mw.PREV_BTN_CLICKED_SIGNAL.connect(self._previous_btn_clicked) mw.NEXT_BTN_CLICKED_SIGNAL.connect(self._next_btn_clicked) mw.LAST_BTN_CLICKED_SIGNAL.connect(self._last_btn_clicked) # Toolbar Signals self._tool_bar.BTN_CLICKED_SIGNAL.connect(self._handle_command) # Internal Signals self._timer_white.timeout.connect(self._timer_white_timeout) self._timer_black.timeout.connect(self._timer_black_timeout) self._collapse_btn.clicked.connect(self._collapse_btn_clicked) def _handle_left_widget(self): if self._collapsed_width is None: return width_increased = self.width() > self._collapsed_width left_widget_is_hidden = not self._left_widget.isVisible() if width_increased and left_widget_is_hidden: self._tool_bar.setVisible(True) self._left_widget.setVisible(True) self._display_pgn_moves() self._collapse_btn.setVisible(True) self._collapse_btn.setText('<') def _toggle_left_widget(self, visibility=None): if visibility is not None: vis_to_set = visibility else: vis_to_set = not self._left_widget.isVisible() self._left_widget.setVisible(vis_to_set) self._collapse_btn.setVisible(vis_to_set) if vis_to_set: self._collapse_btn.setVisible(vis_to_set) self._collapse_btn.setText('<') self._display_pgn_moves() def _recieved_move_string(self, move_string, promotion=None): self.MOVE_SIGNAL.emit((move_string, promotion)) def _move_selected(self, move_index): self._inspect_history(index=move_index) def _toggle_pause(self): if not self._has_game_started: return if self._is_game_over: return if self._is_paused: self._resume_game() else: self._pause_game() self._tool_bar.toogle_pause_icon(is_paused=self._is_paused) def _first_btn_clicked(self): self._inspect_history(start=True) def _previous_btn_clicked(self): self._inspect_history(cursor_step=-1) def _next_btn_clicked(self): self._inspect_history(cursor_step=1) def _last_btn_clicked(self): self._inspect_history(end=True) def _collapse_btn_clicked(self): if not self._has_game_started: return self._tool_bar.setVisible(False) self._toggle_left_widget() self._adjust_size() self._collapsed_width = self.width() def _start_current_player_time(self): if self._current_player == c.Color.white: self._timer_white.start() else: self._timer_black.start() def _stop_current_player_time(self): if self._current_player == c.Color.white: self._timer_white.stop() else: self._timer_black.stop() def _stop_all_timers(self): self._timer_white.stop() self._timer_black.stop() def _timer_white_timeout(self): self._remaining_time_white -= 1 if self._remaining_time_white == 0: self.game_over(winner=c.Color.black) white_wins = False self.GAME_OVER_SIGNAL.emit(white_wins) self._moves_widget.display_win(winner=c.Color.black) self._board_widget.display_time_white(self._remaining_time_white) def _timer_black_timeout(self): self._remaining_time_black -= 1 if self._remaining_time_black == 0: self.game_over(winner=c.Color.white) white_wins = True self.GAME_OVER_SIGNAL.emit(white_wins) self._moves_widget.display_win(winner=c.Color.white) self._board_widget.display_time_black(self._remaining_time_black) def _add_bonus_time(self): if self._current_player == c.Color.white: self._remaining_time_white += self._bonus_time self._board_widget.display_time_white(self._remaining_time_white) else: self._remaining_time_black += self._bonus_time self._board_widget.display_time_black(self._remaining_time_black) def _set_options(self, options): self._custom_options_set = True self._custom_bonus_time = options.bonus_time self._custom_play_time = options.play_time self._custom_white_promotion = options.white_promotion self._custom_black_promotion = options.black_promotion self._custom_is_standard_type = options.is_standard_type self._custom_auto_flip = options.auto_flip def _resign(self, winning_color): if self._is_paused: return if self._is_game_over: return self.game_over(winning_color) white_wins = True if winning_color == c.Color.black: white_wins = False self.GAME_OVER_SIGNAL.emit(white_wins) self._moves_widget.display_win(winner=winning_color) def _inspect_history( self, cursor_step=None, start=False, end=False, index=None ): if self._history_player is None: return if index is not None: result = self._history_player.move_to(index=index) elif start: result = self._history_player.move_to_start() elif end: result = self._history_player.move_to_end() elif cursor_step == 1: result = self._history_player.move_forward() elif cursor_step == -1: result = self._history_player.move_backward() else: error_msg = f'Unknown cursor step: {cursor_step}' raise RuntimeError(error_msg) if result is None or result.board is None: return not_at_end = not(self._history_player.is_at_end) self._board_widget.inspecting_history = not_at_end self._board_widget.board.data = result.board.data self._board_widget.board.reverse = result.board.reverse self.update_board() if result.move is not None: self._board_widget.highlight_move( src=result.move.src, dst=result.move.dst, ) self._move_index = self._history_player.current_index self._moves_widget.highlight_move(move_index=self._move_index) def _toggle_show_threatened(self): self._board_widget.toggle_show_threatened() def _get_pgn2moves(self): msg_box = CustomMessageBox( text="Please select a PGN game file", title="Pgn to movie", parent=self, ) msg_box.exec_() file_path, _ = QtWidgets.QFileDialog.getOpenFileName( parent=None, caption='Load game (.pgn)', filter='*.pgn' ) if not file_path: return return PGN2MOVES(pgn_file_path=file_path) def _handle_load_game(self): if self._has_game_started: return self._pgn2moves = self._get_pgn2moves() if self._pgn2moves is None: return if self._pgn2moves.nb_games == 1: self._load_game() else: game_info = self._pgn2moves.short_info w = LoadGameWidget(game_info=game_info, parent=self) w.SELECTED_GAME_SIGNAL.connect(self._load_game) w.show() def _handle_make_movie(self): if self._has_game_started: return self._pgn2moves = self._get_pgn2moves() if self._pgn2moves is None: return if self._pgn2moves.nb_games == 1: self._make_movie() else: game_info = self._pgn2moves.short_info w = LoadGameWidget(game_info=game_info, parent=self) w.SELECTED_GAME_SIGNAL.connect(self._make_movie) w.show() def _load_game(self, game_index=0): if game_index == -1: # No game was selected return self._reset() self._game_loaded = True self._board_widget.game_loaded = True moves = self._pgn2moves.get_moves(game_index=game_index) bulk_moves = [ (f'{src.address}{dst.address}', promotion) for src, dst, promotion in moves ] self.BULK_MOVE_SIGNAL.emit(bulk_moves) self._stop_all_timers() header = self._pgn2moves.header_info[game_index] result = header.result self._moves_widget.display_win(winning_text=result) self._white_player_name = header.white self._black_player_name = header.black self._game_date = header.date self._moves_widget.set_game_info( white=self._white_player_name, black=self._black_player_name, date=self._game_date, result=result, ) self._board_widget.set_panel_visibility(False) self._toggle_left_widget(visibility=True) self._collapse_btn.setVisible(False) self._adjust_size() self._inspect_history(start=True) def _make_movie(self, game_index=0): if game_index == -1: # No game was selected return msg_box = CustomMessageBox( text="Please select the save location for the movie file", title="Pgn to movie" ) msg_box.exec_() movie_file_path, _ = QtWidgets.QFileDialog.getSaveFileName( parent=None, caption='Create movie (.mp4)', filter='*.mp4' ) if not movie_file_path: return QtWidgets.QApplication.processEvents() self._movie_thread = MovieGenerationThread( creator=self._pgn2moves, game_index=game_index, movie_file_path=movie_file_path ) self._movie_progress_bar = MovieProgressBar() self._movie_progress_bar.show() # Movie Thread Signals mt = self._movie_thread mt.TOTAL_IMAGES_SIGNAL.connect(self._total_images_found) mt.MOVIE_IMAGE_GENERATED_SIGNAL.connect(self._image_written) mt.MOVIE_IMAGE_COMPILED_SIGNAL.connect(self._image_compiled) mt.TITLE_IMAGE_CREATED_SIGNAL.connect(self._movie_title_created) mt.MOVIE_IMAGES_DONE.connect(self._images_done) mt.MOVIE_DONE.connect(self._movie_created) # NOTE: Create Movie should be called in the end after the # the progress bar and the thread have been created and are # connected through the signals properly. Otherwise a few signals from # thread would be missed and cause progress bar crashes or bugs self._movie_thread.create_movie() def _total_images_found(self, nb_images): self._movie_progress_bar.display_text( f'Total {nb_images} images to compile' ) self._movie_progress_bar.total = nb_images def _movie_title_created(self): self._movie_progress_bar.display_text('Title created') def _image_written(self): self._movie_progress_bar.display_text('Creating movie images') self._movie_progress_bar.update_move() def _image_compiled(self): self._movie_progress_bar.display_text('Compiling movie') self._movie_progress_bar.update_compile() def _images_done(self): self._movie_progress_bar.display_text('Images Created') def _movie_created(self): self._movie_progress_bar.display_text('Movie successfully created') self._movie_progress_bar.finish() self._movie_thread.quit() def _handle_save_game(self): if self._game_loaded: return if self._game_data is None: return elif not self._game_data.move_history: return w = SaveGameDataWidget( parent=self, white=self._white_player_name, black=self._black_player_name, date=self._game_date, ) w.DONE_SIGNAL.connect(self._save_game) w.show() def _save_game(self, game_data): result = self._get_result() game_data = ( f'[Event "{game_data.event}"]\n' f'[Site "{game_data.site}"]\n' f'[Date "{game_data.date}"]\n' f'[Round "{game_data.round}"]\n' f'[White "{game_data.white}"]\n' f'[Black "{game_data.black}"]\n' f'[Result "{result}"]\n' ) move_history = self._game_data.move_history game_moves = MOVES2PGN(move_history).text if not game_moves.endswith(result): game_moves = f'{game_moves} {result}' game = f'{game_data}\n{game_moves}' file_path, _
axis=0)) v0 = vn[:, :-1] v1 = vn[:, 1:] cross = np.cross(v0.T, v1.T) dot = np.sum(v0 * v1, axis=0) ang = np.arctan2(cross, dot) uneg = ang < 0 ang[uneg] = -ang[uneg] + np.pi ang[~uneg] = np.pi - ang[~uneg] if not inside: ang = 2 * np.pi - ang if unit == 'deg': return upt, ang * 180 / np.pi elif unit == 'rad': return upt, ang # atan2(cross(a,b)), dot(a,b)) def pltlines(self, lines, fig=[], ax=[], color='r', alpha=1): """ plot a line with a specified color and transparency Parameters ----------- lines : shapely lines fig : matplotlib figure ax : figure axis color : string alpha : float transparency See Also -------- pylayers.gis.layout.Layout.plot """ if fig == []: fig = plt.gcf() if ax == []: ax = plt.gca() c = np.array([l.xy for l in lines]) [ax.plot(x[0, :], x[1, :], color=color, alpha=alpha) for x in c] plt.axis(self.ax) plt.draw() def pltpoly(self, poly, fig=[], ax=[], color='r', alpha=0.2): """ plot a polygon with a specified color and transparency TODO : To be deplaced in an ither class """ if fig == []: fig = plt.gcf() if ax == []: ax = plt.gca() try: mpl = [PolygonPatch(x, alpha=alpha, color=color) for x in poly] except: mpl = [PolygonPatch(x, alpha=alpha, color=color) for x in [poly]] [ax.add_patch(x) for x in mpl] plt.axis(self.ax) plt.draw() def pltvnodes(self, vn, fig=[], ax=[]): """ plot vnodes Parameters ---------- vn : list of nodes fig : ax : """ if fig == []: fig = plt.gcf() if ax == []: ax = plt.gca() if len(vn) > 0: X = np.array([self.Gs.pos[x] for x in vn]) ax.plot(X[:, 0], X[:, 1], 'or') [ax.text(x[0], x[1], vn[xx]) for xx, x in enumerate(X)] return fig, ax def updateshseg(self): """ update shapely segment build a shapely object for all segments This function is called at the beginning of buildGt. See Also -------- buildGt """ seg_connect = {x: self.Gs.node[x]['connect'] for x in self.Gs.nodes() if x > 0} dpts = {x[0]: (self.Gs.pos[x[1][0]], self.Gs.pos[x[1][1]]) for x in seg_connect.items()} self._shseg = {p[0]: sh.LineString(p[1]) for p in dpts.items()} def _triangle_old(self, poly_surround, poly_holes=[], mesh_holes=False): """ perfome a delaunay partitioning on shapely polygons Parameters ---------- poly_surround : sh.Polygon A single polygon to be partitionned poly_holes : list of sh.Polygon A list of polygon contained inside poly_surround. they are considered as holes mesh_holes : bool If True make the delaunay partition of poly_holes else : only partitioning poly_surround and traits poly_holes as holes Returns ------- T : dict dictionnary from triangle.triangulate library T.keys() ['segment_markers', 'segments', 'holes', 'vertices', 'vertex_markers', 'triangles' ] Notes ----- uses triangle library """ if not isinstance(poly_surround, list): poly_surround = [poly_surround] lP = poly_surround + poly_holes vertices = np.ndarray(shape=(2, 0)) segments = np.ndarray(shape=(2, 0), dtype='int') holes = np.ndarray(shape=(2, 0)) segcpt = 0 for p in lP: pts = np.array(p.exterior.xy)[:, :-1] vertices = np.hstack((vertices, pts)) nbv = pts.shape[1] segments = np.hstack((segments, np.array( [np.arange(nbv), np.mod(range(1, nbv + 1), nbv)], dtype='int') + segcpt)) segcpt = segcpt + nbv if not mesh_holes: holes = np.hstack((holes, np.array(p.centroid.xy))) if not mesh_holes: C = {'vertices': vertices.T, 'segments': segments.T, 'holes': holes.T} else: C = {'vertices': vertices.T, 'segments': segments.T} import ipdb ipdb.set_trace() T = triangle.triangulate(C, 'pa') # import triangle.plot as plot # ax=plt.gca() # plot.plot(ax,**T) return T def _merge_polygons(self, lP): """ merge triangle (polygon object) to cvx polygon Parameters ---------- lP : list list of polygon to be merged Return ------ lMP : list list of merged polygons """ lMP = [] # MERGE POLYGONS # move from delaunay triangles to convex polygons while lP != []: p = lP.pop(0) # restrict research to polygon that are touching themself restp = [(ix, x) for ix, x in enumerate(lP) if isinstance(p.intersection(x), sh.LineString)] # self.pltpoly(p,ax=plt.gca()) conv = False pold = p # for ip2,p2 in restp: for ip2, p2 in restp: # inter = p.intersection(p2) # if 2 triangles have a common segment p = p + p2 if p.isconvex(): lP.pop(ip2) lP.insert(0, p) conv = True break else: # if pold not in cpolys: # cpolys.append(pold) p = pold # if (ip2 >= len(polys)):# and (conv): # if conv : # if p not in cpolys: # cpolys.append(p) if restp == [] and conv == True: lMP.append(p) if not conv: # else: if pold not in lMP: lMP.append(pold) if len(lP) == 0: if p not in lMP: lMP.append(p) return lMP def _triangle(self, holes=[], vnodes=[] ,bplot = False): """ Delaunay partitioning on shapely polygons Parameters ---------- holes : ndarray if holes ==[] : it means the merge is applied on the interior of the layout (indoor) if holes == np.ndarray (centroid of polygon). indoor is discarded and delaunay is applied on outdoor Returns ------- T : dict dictionnary from triangle.triangulate library with the following keys ['segment_markers', 'segments', 'holes', 'vertices', 'vertex_markers', 'triangles'] map_vertices : points index Notes ----- This methods uses the `triangle` library """ # this means Delaunay is applied on exterior # and inside polygon will be discarded segbounds = [] ptbounds = [] if holes == []: # remove air segments around layout pass # [segbounds.extend(nx.neighbors(L.Gs,x)) for x in L.lboundary] # ptbounds = L.lboundary if vnodes == []: vnodes = self.Gs.nodes() # find termination points of segments of layout if nx.__version__!='1.10': seg = np.array([self.Gs[x] for x in vnodes if x > 0 and x not in segbounds]) else: seg = np.array([nx.neighbors(self.Gs, x) for x in vnodes if x > 0 and x not in segbounds]) # get vertices/points of layout ivertices = np.array([(x, self.Gs.pos[x][0], self.Gs.pos[x][1]) for x in vnodes if x < 0 and x not in ptbounds]) # map_vertices : points negative index (Np,) map_vertices = ivertices[:, 0].astype('int') # vertices : coordinates (Np x 2) vertices = ivertices[:, 1:] sorter = np.argsort(map_vertices) # mapping between Gs graph segments and triangle segments segments = sorter[np.searchsorted(map_vertices, seg, sorter=sorter)] if holes == []: C = {'vertices': vertices, 'segments': segments} else: C = {'vertices': vertices, 'segments': segments, 'holes': holes} T = triangle.triangulate(C, 'pa') if bplot: import triangle.plot as plot ax=plt.gca() plot.plot(ax,**T) ax = plt.gca() ax.get_xaxis().set_visible(True) ax.get_yaxis().set_visible(True) plt.show() return T, map_vertices def buildGt(self, check=True,difftol=0.01,verbose=False,tqdmpos=0): """ build graph of convex cycles Parameters ---------- check : boolean difftol : float verbose : boolean tqdmpos : progressbar todo : - add an option to only take outside polygon => pass to self._triangle a hole coreesponding to centroid of polygon except those of boundary ( see buildGtold ) """ # 1. Do a Delaunay triangulation # build a list of triangle polygons : lTP # vnodes refers to the nodes of Gs # if vnodes == 0 it means this is a created # segment which is tagged as _AIR ### # if verbose : # Gtpbar = tqdm.tqdm(total=100., desc='BuildGt',position=0) # pbar_awloop = tqdm.tqdm(total=100., desc ='airwalls loop',leave=False,position=1) Gtpbar = pbar(verbose,total=100., desc ='BuildGt',position=tqdmpos) pbartmp = pbar(verbose,total=100., desc ='Triangulation',leave=True,position=tqdmpos+1) T, map_vertices = self._triangle() if verbose: pbartmp.update(100.) Gtpbar.update(100./12.) ptri = T['vertices'][T['triangles']] # List of Triangle Polygons pbartmp = pbar(verbose,total=100., desc ='Transfer polygons list', leave=True, position=tqdmpos+1) lTP = [geu.Polygon(x) for x in ptri] if verbose: pbartmp.update(100.) Gtpbar.update(100./12.) # update vnodes of Polygons pbartmp = pbar(verbose,total=100., desc ='Update Polygons vnodes', leave=True, position=tqdmpos+1) # # p is a polygon # get_points(p) : get points from polygon # this is for limiting the search region for large Layout # [ polygon.setvnodes_new(self.get_points(polygon), self) for polygon in lTP ] if verbose: pbartmp.update(100.) Gtpbar.update(100./12.) # 2.add air walls to triangle poly ### # luaw : list of tuples # ( polygon , array of _AIR segments) pbartmp = pbar(verbose,total=100., desc ='Buiild list of airwalls', leave=True, position=tqdmpos+1) luaw = [(p, np.where(p.vnodes == 0)[0]) for p in lTP] if verbose: pbartmp.update(100.) Gtpbar.update(100./12.) # # For a triangle polygon the number of vnodes # creates new _AIR segments # cpt = 1./(len(luaw)+1) _airseg = [] pbartmp = pbar(verbose,total=100., desc ='Add airwalls',leave=True,position=tqdmpos+1) for p, uaw in luaw: # for each vnodes == 0, add an _AIR if verbose : pbartmp.update(100.*cpt)
database db_artist = await self.mass.music.artists.get_db_item_by_prov_id( prov_artist_id, provider_id=self.id ) if db_artist is None: raise MediaNotFoundError(f"Artist not found: {prov_artist_id}") # TODO: adjust to json query instead of text search query = f"SELECT * FROM tracks WHERE artists LIKE '%\"{db_artist.item_id}\"%'" query += f" AND provider_ids LIKE '%\"{self.type.value}\"%'" return await self.mass.music.tracks.get_db_items(query) async def library_add(self, *args, **kwargs) -> bool: """Add item to provider's library. Return true on succes.""" # already handled by database async def library_remove(self, *args, **kwargs) -> bool: """Remove item from provider's library. Return true on succes.""" # already handled by database # TODO: do we want to process/offer deletions here ? async def add_playlist_tracks( self, prov_playlist_id: str, prov_track_ids: List[str] ) -> None: """Add track(s) to playlist.""" itempath = await self.get_filepath(MediaType.PLAYLIST, prov_playlist_id) if not await self.exists(itempath): raise MediaNotFoundError(f"Playlist path does not exist: {itempath}") async with self.open_file(itempath, "r") as _file: cur_data = await _file.read() async with self.open_file(itempath, "w") as _file: await _file.write(cur_data) for uri in prov_track_ids: await _file.write(f"\n{uri}") async def remove_playlist_tracks( self, prov_playlist_id: str, prov_track_ids: List[str] ) -> None: """Remove track(s) from playlist.""" itempath = await self.get_filepath(MediaType.PLAYLIST, prov_playlist_id) if not await self.exists(itempath): raise MediaNotFoundError(f"Playlist path does not exist: {itempath}") cur_lines = [] async with self.open_file(itempath, "r") as _file: for line in await _file.readlines(): line = urllib.parse.unquote(line.strip()) if line not in prov_track_ids: cur_lines.append(line) async with self.open_file(itempath, "w") as _file: for uri in cur_lines: await _file.write(f"{uri}\n") async def get_stream_details(self, item_id: str) -> StreamDetails: """Return the content details for the given track when it will be streamed.""" itempath = await self.get_filepath(MediaType.TRACK, item_id) if not await self.exists(itempath): raise MediaNotFoundError(f"Track path does not exist: {itempath}") def parse_tag(): return TinyTag.get(itempath) tags = await self.mass.loop.run_in_executor(None, parse_tag) return StreamDetails( type=StreamType.FILE, provider=self.type, item_id=item_id, content_type=ContentType(itempath.split(".")[-1]), path=itempath, sample_rate=tags.samplerate or 44100, bit_depth=16, # TODO: parse bitdepth ) async def _parse_track(self, track_path: str) -> Track | None: """Try to parse a track from a filename by reading its tags.""" if not await self.exists(track_path): raise MediaNotFoundError(f"Track path does not exist: {track_path}") track_item_id = self._get_item_id(track_path) if not TinyTag.is_supported(track_path): return None # parse ID3 tags with TinyTag def parse_tags(): return TinyTag.get(track_path, image=True, ignore_errors=True) tags = await self.mass.loop.run_in_executor(None, parse_tags) # prefer title from tag, fallback to filename if tags.title: track_title = tags.title else: ext = track_path.split(".")[-1] track_title = track_path.split(os.sep)[-1] track_title = track_title.replace(f".{ext}", "").replace("_", " ") self.logger.warning( "%s is missing ID3 tag [title], using filename as fallback", track_path ) name, version = parse_title_and_version(track_title) track = Track( item_id=track_item_id, provider=self.type, name=name, version=version, # a track on disk is always in library in_library=True, ) # album # work out if we have an artist/album/track.ext structure if tags.album: track_parts = track_path.rsplit(os.sep) album_folder = None artist_folder = None parentdir = os.path.dirname(track_path) for _ in range(len(track_parts)): dirname = parentdir.rsplit(os.sep)[-1] if compare_strings(dirname, tags.albumartist): artist_folder = parentdir if compare_strings(dirname, tags.album): album_folder = parentdir parentdir = os.path.dirname(parentdir) # album artist if artist_folder: album_artists = [ await self._parse_artist( name=tags.albumartist, artist_path=artist_folder, in_library=True, ) ] elif tags.albumartist: album_artists = [ await self._parse_artist(name=item, in_library=True) for item in split_items(tags.albumartist) ] else: # always fallback to various artists as album artist if user did not tag album artist # ID3 tag properly because we must have an album artist album_artists = [await self._parse_artist(name=FALLBACK_ARTIST)] self.logger.warning( "%s is missing ID3 tag [albumartist], using %s as fallback", track_path, FALLBACK_ARTIST, ) track.album = await self._parse_album( name=tags.album, album_path=album_folder, artists=album_artists, in_library=True, ) else: self.logger.warning("%s is missing ID3 tag [album]", track_path) # track artist(s) if tags.artist == tags.albumartist: track.artists = track.album.artists else: # Parse track artist(s) from artist string using common splitters used in ID3 tags # NOTE: do not use a '/' or '&' to prevent artists like AC/DC become messed up track_artists_str = tags.artist or FALLBACK_ARTIST track.artists = [ await self._parse_artist(item, in_library=False) for item in split_items(track_artists_str) ] # Check if track has embedded metadata img = await self.mass.loop.run_in_executor(None, tags.get_image) if not track.metadata.images and img: # we do not actually embed the image in the metadata because that would consume too # much space and bandwidth. Instead we set the filename as value so the image can # be retrieved later in realtime. track.metadata.images = [MediaItemImage(ImageType.THUMB, track_path, True)] if track.album and not track.album.metadata.images: track.album.metadata.images = track.metadata.images # parse other info track.duration = tags.duration track.metadata.genres = set(split_items(tags.genre)) track.disc_number = try_parse_int(tags.disc) track.track_number = try_parse_int(tags.track) track.isrc = tags.extra.get("isrc", "") if "copyright" in tags.extra: track.metadata.copyright = tags.extra["copyright"] if "lyrics" in tags.extra: track.metadata.lyrics = tags.extra["lyrics"] quality_details = "" if track_path.endswith(".flac"): # TODO: get bit depth quality = MediaQuality.FLAC_LOSSLESS if tags.samplerate > 192000: quality = MediaQuality.FLAC_LOSSLESS_HI_RES_4 elif tags.samplerate > 96000: quality = MediaQuality.FLAC_LOSSLESS_HI_RES_3 elif tags.samplerate > 48000: quality = MediaQuality.FLAC_LOSSLESS_HI_RES_2 quality_details = f"{tags.samplerate / 1000} Khz" elif track_path.endswith(".ogg"): quality = MediaQuality.LOSSY_OGG quality_details = f"{tags.bitrate} kbps" elif track_path.endswith(".m4a"): quality = MediaQuality.LOSSY_AAC quality_details = f"{tags.bitrate} kbps" else: quality = MediaQuality.LOSSY_MP3 quality_details = f"{tags.bitrate} kbps" track.add_provider_id( MediaItemProviderId( item_id=track_item_id, prov_type=self.type, prov_id=self.id, quality=quality, details=quality_details, url=track_path, ) ) return track async def _parse_artist( self, name: Optional[str] = None, artist_path: Optional[str] = None, in_library: bool = True, ) -> Artist | None: """Lookup metadata in Artist folder.""" assert name or artist_path if not artist_path: # create fake path artist_path = os.path.join(self.config.path, name) artist_item_id = self._get_item_id(artist_path) if not name: name = artist_path.split(os.sep)[-1] artist = Artist( artist_item_id, self.type, name, provider_ids={ MediaItemProviderId(artist_item_id, self.type, self.id, url=artist_path) }, in_library=in_library, ) if not await self.exists(artist_path): # return basic object if there is no dedicated artist folder return artist # always mark artist as in-library when it exists as folder on disk artist.in_library = True nfo_file = os.path.join(artist_path, "artist.nfo") if await self.exists(nfo_file): # found NFO file with metadata # https://kodi.wiki/view/NFO_files/Artists async with self.open_file(nfo_file, "r") as _file: data = await _file.read() info = await self.mass.loop.run_in_executor(None, xmltodict.parse, data) info = info["artist"] artist.name = info.get("title", info.get("name", name)) if sort_name := info.get("sortname"): artist.sort_name = sort_name if musicbrainz_id := info.get("musicbrainzartistid"): artist.musicbrainz_id = musicbrainz_id if descripton := info.get("biography"): artist.metadata.description = descripton if genre := info.get("genre"): artist.metadata.genres = set(split_items(genre)) # find local images images = [] async for _path in scantree(artist_path): _filename = _path.path ext = _filename.split(".")[-1] if ext not in ("jpg", "png"): continue _filepath = os.path.join(artist_path, _filename) for img_type in ImageType: if img_type.value in _filepath: images.append(MediaItemImage(img_type, _filepath, True)) elif _filename == "folder.jpg": images.append(MediaItemImage(ImageType.THUMB, _filepath, True)) if images: artist.metadata.images = images return artist async def _parse_album( self, name: Optional[str] = None, album_path: Optional[str] = None, artists: List[Artist] = None, in_library: bool = True, ) -> Album | None: """Lookup metadata in Album folder.""" assert (name or album_path) and artists # create fake path album_path = os.path.join(self.config.path, artists[0].name, name) album_item_id = self._get_item_id(album_path) if not name: name = album_path.split(os.sep)[-1] album = Album( album_item_id, self.type, name, artists=artists, provider_ids={ MediaItemProviderId(album_item_id, self.type, self.id, url=album_path) }, in_library=in_library, ) if not await self.exists(album_path): # return basic object if there is no dedicated album folder return album # always mark as in-library when it exists as folder on disk album.in_library = True nfo_file = os.path.join(album_path, "album.nfo") if await self.exists(nfo_file): # found NFO file with metadata # https://kodi.wiki/view/NFO_files/Artists async with self.open_file(nfo_file) as _file: data = await _file.read() info = await self.mass.loop.run_in_executor(None, xmltodict.parse, data) info = info["album"] album.name = info.get("title", info.get("name", name)) if sort_name := info.get("sortname"): album.sort_name = sort_name if musicbrainz_id := info.get("musicbrainzreleasegroupid"): album.musicbrainz_id = musicbrainz_id if mb_artist_id := info.get("musicbrainzalbumartistid"): if album.artist and not album.artist.musicbrainz_id: album.artist.musicbrainz_id = mb_artist_id if description := info.get("review"): album.metadata.description = description if year := info.get("label"): album.year = int(year) if genre := info.get("genre"): album.metadata.genres = set(split_items(genre)) # parse name/version album.name, album.version = parse_title_and_version(album.name) # try to guess the album type album_tracks = [ x async for x in scantree(album_path) if TinyTag.is_supported(x.path) ] if album.artist.sort_name == "variousartists": album.album_type = AlbumType.COMPILATION elif len(album_tracks) <= 5: album.album_type = AlbumType.SINGLE else: album.album_type = AlbumType.ALBUM # find local images images = [] async for _path in scantree(album_path): _filename = _path.path ext = _filename.split(".")[-1] if ext not in ("jpg", "png"): continue _filepath = os.path.join(album_path, _filename) for img_type in ImageType: if img_type.value in _filepath: images.append(MediaItemImage(img_type, _filepath, True)) elif _filename == "folder.jpg": images.append(MediaItemImage(ImageType.THUMB, _filepath, True)) if images: album.metadata.images = images return album async def _parse_playlist(self,
/> <h5 class="text-center">Moving Averages</h5> <div class="row"> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">15 Minutes</th> </thead> <thead> <th scope="col">EMA12</th> <th scope="col">EMA26</th> <th scope="col">Status</th> </thead> <tbody> <tr class="{'table-success' if df_15m_last['ema12'].values[0] > df_15m_last['ema26'].values[0] else 'table-danger'}"> <td>{'%.08f' % round(df_15m_last['ema12'].values[0], 8)}</td> <td>{'%.08f' % round(df_15m_last['ema26'].values[0], 8)}</td> <td>{'EMA12 > EMA26' if df_15m_last['ema12'].values[0] > df_15m_last['ema26'].values[0] else 'EMA12 <= EMA26'}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">1 Hour</th> </thead> <thead> <th scope="col">EMA12</th> <th scope="col">EMA26</th> <th scope="col">Status</th> </thead> <tbody> <tr class="{'table-success' if df_1h_last['ema12'].values[0] > df_1h_last['ema26'].values[0] else 'table-danger'}"> <td>{'%.08f' % round(df_1h_last['ema12'].values[0], 8)}</td> <td>{'%.08f' % round(df_1h_last['ema26'].values[0], 8)}</td> <td>{'EMA12 > EMA26' if df_1h_last['ema12'].values[0] > df_1h_last['ema26'].values[0] else 'EMA12 <= EMA26'}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">6 Hour</th> </thead> <thead> <th scope="col">EMA12</th> <th scope="col">EMA26</th> <th scope="col">Status</th> </thead> <tbody> <tr class="{'table-success' if df_6h_last['ema12'].values[0] > df_6h_last['ema26'].values[0] else 'table-danger'}"> <td>{'%.08f' % round(df_6h_last['ema12'].values[0], 8)}</td> <td>{'%.08f' % round(df_6h_last['ema26'].values[0], 8)}</td> <td>{'EMA12 > EMA26' if df_6h_last['ema12'].values[0] > df_6h_last['ema26'].values[0] else 'EMA12 <= EMA26'}</td> </tr> </tbody> </table> </div> </div> <div class="row"> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" style="width: 30%">SMA50</th> <th scope="col" style="width: 30%">SMA200</th> <th scope="col" style="width: 40%">Status</th> </thead> <tbody> <tr class="{'table-success' if df_15m_last['sma50'].values[0] > df_15m_last['sma200'].values[0] else 'table-danger'}"> <td>{'%.08f' % round(df_15m_last['sma50'].values[0], 8)}</td> <td>{'%.08f' % round(df_15m_last['sma200'].values[0], 8)}</td> <td>{'SMA50 > SMA200' if df_15m_last['sma50'].values[0] > df_15m_last['sma200'].values[0] else 'SMA50 <= SMA200'}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" style="width: 30%">SMA50</th> <th scope="col" style="width: 30%">SMA200</th> <th scope="col" style="width: 40%">Status</th> </thead> <tbody> <tr class="{'table-success' if df_1h_last['sma50'].values[0] > df_1h_last['sma200'].values[0] else 'table-danger'}"> <td>{'%.08f' % round(df_1h_last['sma50'].values[0], 8)}</td> <td>{'%.08f' % round(df_1h_last['sma200'].values[0], 8)}</td> <td>{'SMA50 > SMA200' if df_1h_last['sma50'].values[0] > df_1h_last['sma200'].values[0] else 'SMA50 <= SMA200'}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" style="width: 30%">SMA50</th> <th scope="col" style="width: 30%">SMA200</th> <th scope="col" style="width: 40%">Status</th> </thead> <tbody> <tr class="{'table-success' if df_6h_last['sma50'].values[0] > df_6h_last['sma200'].values[0] else 'table-danger'}"> <td>{'%.08f' % round(df_6h_last['sma50'].values[0], 8)}</td> <td>{'%.08f' % round(df_6h_last['sma200'].values[0], 8)}</td> <td>{'SMA50 > SMA200' if df_6h_last['sma50'].values[0] > df_6h_last['sma200'].values[0] else 'SMA50 <= SMA200'}</td> </tr> </tbody> </table> </div> </div> <br /> <h5 class="text-center">Momentum Indicators</h5> <div class="row"> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">15 Minutes</th> </thead> <thead> <th scope="col" style="width: 50%">RSI14</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{rsi14_15m_class}"> <td>{'%.08f' % round(df_15m_last['rsi14'].values[0], 8)}</td> <td>{rsi14_15m_desc}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">1 Hour</th> </thead> <thead> <th scope="col" style="width: 50%">RSI14</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{rsi14_1h_class}"> <td>{'%.08f' % round(df_1h_last['rsi14'].values[0], 8)}</td> <td>{rsi14_1h_desc}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">6 Hour</th> </thead> <thead> <th scope="col" style="width: 50%">RSI14</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{rsi14_6h_class}"> <td>{'%.08f' % round(df_6h_last['rsi14'].values[0], 8)}</td> <td>{rsi14_6h_desc}</td> </tr> </tbody> </table> </div> </div> <div class="row"> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" style="width: 50%">StochRSI14</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{stochrsi14_15m_class}"> <td>{'%.08f' % round(df_15m_last['stochrsi14'].values[0], 8)}</td> <td>{stochrsi14_15m_desc}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" style="width: 50%">StochRSI14</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{stochrsi14_1h_class}"> <td>{'%.08f' % round(df_1h_last['stochrsi14'].values[0], 8)}</td> <td>{stochrsi14_1h_desc}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" style="width: 50%">StochRSI14</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{stochrsi14_6h_class}"> <td>{'%.08f' % round(df_6h_last['stochrsi14'].values[0], 8)}</td> <td>{stochrsi14_6h_desc}</td> </tr> </tbody> </table> </div> </div> <div class="row"> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" style="width: 50%">Williams %R</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{williamsr14_15m_class}"> <td>{'%.08f' % round(df_15m_last['williamsr14'].values[0], 8)}</td> <td>{williamsr14_15m_desc}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" style="width: 50%">Williams %R</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{williamsr14_1h_class}"> <td>{'%.08f' % round(df_1h_last['williamsr14'].values[0], 8)}</td> <td>{williamsr14_1h_desc}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" style="width: 50%">Williams %R</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{williamsr14_6h_class}"> <td>{'%.08f' % round(df_6h_last['williamsr14'].values[0], 8)}</td> <td>{williamsr14_6h_desc}</td> </tr> </tbody> </table> </div> </div> <br /> <h5 class="text-center">Trend Indicators</h5> <div class="row"> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">15 Minutes</th> </thead> <thead> <th scope="col" style="width: 30%">MACD</th> <th scope="col" style="width: 30%">Signal</th> <th scope="col" style="width: 40%">Status</th> </thead> <tbody> <tr class="{'table-success' if df_15m_last['macd'].values[0] > df_15m_last['signal'].values[0] else 'table-danger'}"> <td>{'%.08f' % round(df_15m_last['macd'].values[0], 8)}</td> <td>{'%.08f' % round(df_15m_last['signal'].values[0], 8)}</td> <td>{'MACD > Signal' if df_15m_last['macd'].values[0] > df_15m_last['signal'].values[0] else 'MACD <= Signal'}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">1 Hour</th> </thead> <thead> <th scope="col" style="width: 30%">MACD</th> <th scope="col" style="width: 30%">Signal</th> <th scope="col" style="width: 40%">Status</th> </thead> <tbody> <tr class="{'table-success' if df_1h_last['macd'].values[0] > df_1h_last['signal'].values[0] else 'table-danger'}"> <td>{'%.08f' % round(df_1h_last['macd'].values[0], 8)}</td> <td>{'%.08f' % round(df_1h_last['signal'].values[0], 8)}</td> <td>{'MACD > Signal' if df_1h_last['macd'].values[0] > df_1h_last['signal'].values[0] else 'MACD <= Signal'}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">6 Hour</th> </thead> <thead> <th scope="col" style="width: 30%">MACD</th> <th scope="col" style="width: 30%">Signal</th> <th scope="col" style="width: 40%">Status</th> </thead> <tbody> <tr class="{'table-success' if df_6h_last['macd'].values[0] > df_6h_last['signal'].values[0] else 'table-danger'}"> <td>{'%.08f' % round(df_6h_last['macd'].values[0], 8)}</td> <td>{'%.08f' % round(df_6h_last['signal'].values[0], 8)}</td> <td>{'MACD > Signal' if df_6h_last['macd'].values[0] > df_6h_last['signal'].values[0] else 'MACD <= Signal'}</td> </tr> </tbody> </table> </div> </div> <div class="row"> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="2" style="width: 60%">ADX14</th> <th scope="col" style="width: 40%">Status</th> </thead> <tbody> <tr class="{adx14_15m_class}"> <td colspan="2">{'%.08f' % round(df_15m_last['adx14'].values[0], 8)}</td> <td>{adx14_15m_desc}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="2" style="width: 60%">ADX14</th> <th scope="col" style="width: 40%">Status</th> </thead> <tbody> <tr class="{adx14_1h_class}"> <td colspan="2">{'%.08f' % round(df_1h_last['adx14'].values[0], 8)}</td> <td>{adx14_1h_desc}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="2" style="width: 60%">ADX14</th> <th scope="col" style="width: 40%">Status</th> </thead> <tbody> <tr class="{adx14_6h_class}"> <td colspan="2">{'%.08f' % round(df_6h_last['adx14'].values[0], 8)}</td> <td>{adx14_6h_desc}</td> </tr> </tbody> </table> </div> </div> <br /> <h5 class="text-center">Volume Indicators</h5> <div class="row"> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">15 Minutes</th> </thead> <thead> <th scope="col" style="width: 50%">OBV10</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{'table-success' if df_15m_last['obv'].values[0] > 0 else 'table-danger'}"> <td>{'%.0f' % df_15m_last['obv'].values[0]} ({df_15m_last['obv_pc'].values[0]}%)</td> <td>{'OBV > 0' if df_15m_last['obv'].values[0] > 0 else 'OBV <= 0'}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">1 Hour</th> </thead> <thead> <th scope="col" style="width: 50%">OBV10</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{'table-success' if df_1h_last['obv'].values[0] > 0 else 'table-danger'}"> <td>{'%.0f' % df_1h_last['obv'].values[0]} ({df_1h_last['obv_pc'].values[0]}%)</td> <td>{'OBV > 0' if df_1h_last['obv'].values[0] > 0 else 'OBV <= 0'}</td> </tr> </tbody> </table> </div> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="3">6 Hour</th> </thead> <thead> <th scope="col" style="width: 50%">OBV10</th> <th scope="col" style="width: 50%">Status</th> </thead> <tbody> <tr class="{'table-success' if df_6h_last['obv'].values[0] > 0 else 'table-danger'}"> <td>{'%.0f' % df_6h_last['obv'].values[0]} ({df_6h_last['obv_pc'].values[0]}%)</td> <td>{'OBV > 0' if df_6h_last['obv'].values[0] > 0 else 'OBV <= 0'}</td> </tr> </tbody> </table> </div> </div> <br /> <h5 class="text-center">Fibonacci Retracement Levels</h5> <div class="row"> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="5">15 Minutes</th> </thead> <thead> <th scope="col" style="width: 20%">23.6%</th> <th scope="col" style="width: 20%">38.2%</th> <th scope="col" style="width: 20%">50%</th> <th scope="col" style="width: 20%">61.8%</th> <th scope="col" style="width: 20%">78.6%</th> </thead> <tbody> <tr class="{rsi14_15m_class}"> <td>{'%.08f' % round(df_15m_last['fbb_lower0_236'].values[0], 8)}</td> <td>{'%.08f' % round(df_15m_last['fbb_lower0_382'].values[0], 8)}</td> <td>{'%.08f' % round(df_15m_last['fbb_lower0_5'].values[0], 8)}</td> <td>{'%.08f' % round(df_15m_last['fbb_lower0_618'].values[0], 8)}</td> <td>{'%.08f' % round(df_15m_last['fbb_lower0_786'].values[0], 8)}</td> </tr> </tbody> </table> </div> </div> <div class="row"> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="5">1 Hour</th> </thead> <thead> <th scope="col" style="width: 20%">23.6%</th> <th scope="col" style="width: 20%">38.2%</th> <th scope="col" style="width: 20%">50%</th> <th scope="col" style="width: 20%">61.8%</th> <th scope="col" style="width: 20%">78.6%</th> </thead> <tbody> <tr class="{rsi14_15m_class}"> <td>{'%.08f' % round(df_1h_last['fbb_lower0_236'].values[0], 8)}</td> <td>{'%.08f' % round(df_1h_last['fbb_lower0_382'].values[0], 8)}</td> <td>{'%.08f' % round(df_1h_last['fbb_lower0_5'].values[0], 8)}</td> <td>{'%.08f' % round(df_1h_last['fbb_lower0_618'].values[0], 8)}</td> <td>{'%.08f' % round(df_1h_last['fbb_lower0_786'].values[0], 8)}</td> </tr> </tbody> </table> </div> </div> <div class="row"> <div class="col-sm"> <table class="table table-sm table-light table-hover table-striped"> <thead> <th scope="col" colspan="5">6 Hours</th> </thead> <thead> <th scope="col" style="width: 20%">23.6%</th> <th scope="col" style="width: 20%">38.2%</th> <th scope="col" style="width: 20%">50%</th> <th scope="col" style="width: 20%">61.8%</th> <th scope="col" style="width: 20%">78.6%</th> </thead> <tbody> <tr class="{rsi14_15m_class}"> <td>{'%.08f' % round(df_6h_last['fbb_lower0_236'].values[0], 8)}</td> <td>{'%.08f' % round(df_6h_last['fbb_lower0_382'].values[0], 8)}</td> <td>{'%.08f' % round(df_6h_last['fbb_lower0_5'].values[0],
<filename>exoplanet_transit_snr/snr_estimate.py # -*- coding: utf-8 -*- from ctypes import c_long from glob import glob from itertools import combinations from os import makedirs from os.path import basename, dirname, join, realpath from typing import Tuple, Union import matplotlib.pyplot as plt import numpy as np import pandas as pd from astropy import units as u from astropy.constants import c from astropy.io import fits from astropy.time import Time from exoorbit.bodies import Planet, Star # from cats.extractor.runner import CatsRunner # from cats.simulator.detector import Crires # from cats.spectrum import SpectrumArray from exoorbit.orbit import Orbit from genericpath import exists from scipy.constants import speed_of_light from scipy.interpolate import interp1d, splev, splrep from scipy.optimize import curve_fit from scipy.signal import correlate from scipy.special import binom from tqdm import tqdm from .stats import cohen_d, gauss, gaussfit from .sysrem import Sysrem, SysremWithProjection # Speed of light in km/s c_light = speed_of_light * 1e-3 # TODO List: # - automatically mask points before fitting with SME # - if star and planet steps aren't run manually, we use the initial values # instead we should load the data if possible # - Tests for all the steps # - Refactoring of the steps, a lot of the code is strewm all over the place # - Determine Uncertainties for each point def coadd_cross_correlation( cc_data: np.ndarray, rv: np.ndarray, rv_array: np.ndarray, times: Time, planet: Planet, data_dir: str = None, cache_suffix: str = "", load: bool = True, ): """Sum the cross correlation data along the expected planet trajectory Parameters ---------- cc_data : np.ndarray cross correlation data, between all combinations of spectra rv : np.ndarray radial velocity of the planet at each time point rv_array : np.ndarray radial velocity points of the cc_data times : Time observation times of the spectra planet : Planet Planet metadata object data_dir : str, optional directory to cache data in, by default None load : bool, optional whether to load data from the cache or not, by default True Returns ------- coadd_sum sum of all cross correlation data coadd_sum_it sum of cross correlation data, during the transit coadd_sum_oot sum of cross correlation data, out of the transit """ if data_dir is not None: savefilename = realpath( join(data_dir, f"../medium/cross_correlation_coadd{cache_suffix}.npz") ) if load and exists(savefilename): data = np.load(savefilename) coadd_sum = data["coadd_sum"] coadd_sum_it = data["coadd_sum_it"] coadd_sum_oot = data["coadd_sum_oot"] return coadd_sum, coadd_sum_it, coadd_sum_oot cc_data_interp = np.zeros_like(cc_data) for i in tqdm(range(len(cc_data)), leave=False): for j in tqdm(range(len(cc_data[0])), leave=False): # -3 and +4 is the same bc of how python does things # cc_data[i, j, mid - offset : mid + offset + 1] = 0 cc_data_interp[i, j] = np.interp( rv_array - (rv[i] - rv[j]).to_value("km/s"), rv_array, cc_data[i, j] ) # Co-add to sort of stack them together coadd_sum = np.sum(cc_data_interp, axis=(0, 1)) phi = (times - planet.time_of_transit) / planet.period phi = phi.to_value(1) # We only care about the fraction phi = phi % 1 ingress = (-planet.transit_duration / 2 / planet.period).to_value(1) % 1 egress = (planet.transit_duration / 2 / planet.period).to_value(1) % 1 in_transit = (phi >= ingress) | (phi <= egress) out_transit = ~in_transit coadd_sum_it = np.sum(cc_data_interp[in_transit][:, in_transit], axis=(0, 1)) coadd_sum_oot = np.sum(cc_data_interp[out_transit][:, out_transit], axis=(0, 1)) if data_dir is not None: np.savez( savefilename, coadd_sum=coadd_sum, coadd_sum_it=coadd_sum_it, coadd_sum_oot=coadd_sum_oot, ) return coadd_sum, coadd_sum_it, coadd_sum_oot def run_cross_correlation( data: Tuple, nsysrem: int, rv_range: float = 100, rv_step: float = 1, skip: Tuple = None, load: bool = False, data_dir: str = None, rv_star: np.ndarray = None, rv_planet: np.ndarray = None, airmass: np.ndarray = None, spec: np.ndarray = None, cache_suffix="", ): wave, flux, uncs, _, segments, _ = data if data_dir is not None: savefilename = realpath( join(data_dir, f"../medium/cross_correlation{cache_suffix}.npz") ) if load and exists(savefilename): data = np.load(savefilename) if "rv_array" in data: rv_array = data["rv_array"] else: rv_array = np.arange(-rv_range, rv_range + rv_step, rv_step) return data, rv_array skip_mask = np.full(flux.shape[1], True) if skip is not None: for seg in skip: skip_mask[segments[seg] : segments[seg + 1]] = False # reference = cross_correlation_reference if isinstance(wave, u.Quantity): wave = wave.to_value(u.AA) if isinstance(flux, u.Quantity): flux = flux.to_value(1) if uncs is None: uncs = np.ones_like(flux) elif isinstance(uncs, u.Quantity): uncs = uncs.to_value(1) if rv_planet is not None and isinstance(rv_planet, u.Quantity): rv_planet = rv_planet.to_value(u.km / u.s) if rv_star is not None and isinstance(rv_star, u.Quantity): rv_star = rv_star.to_value(u.km / u.s) n = flux.shape[0] // 2 spec = flux[n] uncs = np.clip(uncs, 1, None) for low, upp in zip(segments[:-1], segments[1:]): # flux[:, low:upp] -= np.nanmin(flux[:, low:upp]) flux[:, low:upp] /= np.nanmax(flux[:, low:upp]) # corrected_flux = np.load("corrected_flux_LTT1445A_projected_1.npy") sysrem = SysremWithProjection(wave[n], flux, spec, rv_star, airmass, uncs) corrected_flux, *_ = sysrem.run(nsysrem) # sysrem = Sysrem(corrected_flux) # corrected_flux, *_ = sysrem.run(nsysrem-1) np.save("corrected_flux_LTT1445A_projected_1.npy", corrected_flux) # Normalize by the standard deviation in this wavelength column std = np.nanstd(corrected_flux, axis=0) std[std == 0] = 1 corrected_flux /= std rv_array = np.linspace(-10, 10, flux.shape[1]) spl = [None] * flux.shape[0] for i in range(flux.shape[0]): spl[i] = splrep(wave[i], corrected_flux[i]) # Run the cross correlation for all times and radial velocity offsets corr = np.zeros((flux.shape[0], flux.shape[0], flux.shape[1]), dtype="f4") total = int(binom(flux.shape[0], 2)) for i, j in tqdm( combinations(range(flux.shape[0]), 2), total=total, desc="Combinations" ): # Zero Normalized Cross Correlation a = corrected_flux[i] v = corrected_flux[j] for k in range(-100, 100, 1): pass if rv_planet is not None: rva = 1 - rv_planet[i] / c_light rvv = 1 - rv_planet[j] / c_light a = splev(wave[n] * rva, spl[i]) v = splev(wave[n] * rvv, spl[j]) # a = interp1d(wave[i] * rva, a, kind="linear", fill_value=1, bounds_error=False)(wave[n]) # v = interp1d(wave[j] * rvv, v, kind="linear", fill_value=1, bounds_error=False)(wave[n]) a = (a - np.mean(a)) / (np.std(a) * len(a)) v = (v - np.mean(v)) / (np.std(v)) corr[i, j] = correlate(a, v, mode="same") correlation = {str(nsysrem): corr} # correlation[:, :, flux.shape[1] // 2] = np.nan if data_dir is not None: np.savez(savefilename, **correlation, rv_array=rv_array) return correlation, rv_array def cross_correlation_reference(wave, ptr_wave, ptr_flux, rv_range=100, rv_step=1): rv_points = int((2 * rv_range + 1) / rv_step) rv = np.linspace(-rv_range, rv_range, num=rv_points) rep = splrep(ptr_wave, ptr_flux) reference = np.zeros((rv_points, wave.size)) for i in tqdm(range(rv_points)): rv_factor = np.sqrt((1 - rv[i] / c_light) / (1 + rv[i] / c_light)) ref = splev(wave * rv_factor, rep) reference[i] = np.nan_to_num(ref) return reference def run_cross_correlation_ptr( corrected_flux: np.ndarray, reference: np.ndarray, segments: np.ndarray, rv_range: float = 100, rv_step: float = 1, skip: Tuple = None, load: bool = False, data_dir: str = None, cache_suffix: str = "", ): rv_points = int(2 * rv_range / rv_step + 1) if data_dir is not None: savefilename = realpath( join(data_dir, f"../medium/cross_correlation{cache_suffix}.npz") ) if load and exists(savefilename): data = np.load(savefilename) rv_array = data["rv_array"] data = data["corr"] return data, rv_array skip_mask = np.full(corrected_flux.shape[1], True) if skip is not None: for seg in skip: skip_mask[segments[seg] : segments[seg + 1]] = False # reference = cross_correlation_reference if isinstance(corrected_flux, u.Quantity): corrected_flux = corrected_flux.to_value(1) rv_array = np.linspace(-rv_range, rv_range, rv_points) nseg = len(segments) - 1 nobs = corrected_flux.shape[0] # Run the cross correlation for all times and radial velocity offsets corr = np.zeros((nseg, nobs, rv_points)) for k, (low, upp) in tqdm( enumerate(zip(segments[:-1], segments[1:])), total=len(segments) - 1, desc="Segment", ): for i in tqdm(range(nobs), total=nobs, desc="Obs", leave=False): for j in tqdm( range(rv_points), leave=False, desc="rv points", total=rv_points ): # Zero Normalized Cross Correlation a = corrected_flux[i, low:upp] v = reference[j, low:upp] corr[k, i, j] += np.nansum(a * v) corr[k, i, j] *= corr[k, i, j].size / np.count_nonzero(corr[k, i, j]) # a = (a - np.nanmean(a)) / np.nanstd(a) # v = (v - np.nanmean(v)) / np.nanstd(v) # corr[k, i, j] += np.nanmean(a * v) if data_dir is not None: np.savez(savefilename, corr=corr, rv_array=rv_array) return corr, rv_array def calculate_cohen_d_for_dataset( data, datetime, star, planet, rv_range=100, rv_step=1, vsys_range=(-20, 20), kp_range=(-150, 150), ): phi = (datetime - planet.time_of_transit) / planet.period phi = phi.to_value(1) # We only care about the fraction phi = phi % 1 vsys = star.radial_velocity.to_value("km/s") kp = Orbit(star, planet).radial_velocity_semiamplitude_planet().to_value("km/s") vp = vsys + kp * np.sin(2 * np.pi * phi) ingress = (-planet.transit_duration / 2 / planet.period).to_value(1) % 1 egress = (planet.transit_duration / 2 / planet.period).to_value(1) % 1 in_transit = (phi >= ingress) | (phi <= egress) rv_points = int(2 * rv_range / rv_step + 1) rv = np.linspace(-rv_range, rv_range, rv_points) vsys_min, vsys_max = int(vsys + vsys_range[0]), int(vsys + vsys_range[1]) kp_min, kp_max = int(kp + kp_range[0]), int(kp + kp_range[1]) vsys = np.linspace(vsys_min, vsys_max,
23 1.1321 cf-cf-oh 86.7 116.85 SOURCE4_SOURCE5 19 1.5331 ce-cf-cy 62.2 137.58 SOURCE4_SOURCE5 18 1.4229 ce-cf-h4 49.3 122.95 SOURCE4_SOURCE5 18 1.1766 ce-cf-n1 90.5 119.94 SOURCE4_SOURCE5 7 1.8420 ce-cf-nh 87.3 121.38 SOURCE4_SOURCE5 27 1.6583 ch-cf-n2 87.9 121.14 SOURCE4_SOURCE5 8 0.9418 c -cf-oh 86.2 115.76 SOURCE4_SOURCE5 15 2.2145 c -cf-os 86.1 114.67 SOURCE4_SOURCE5 26 2.3740 h4-cf-n1 64.9 116.64 SOURCE4_SOURCE5 12 0.5604 h4-cf-nf 62.2 115.65 SOURCE4_SOURCE5 12 1.7190 n2-cf-os 114.2 117.95 SOURCE4_SOURCE5 13 0.4519 n2-cf-ss 81.5 117.23 SOURCE4 6 nf-cf-nh 111.3 113.64 SOURCE4_SOURCE5 29 1.5167 ne-cf-nh 112.3 119.27 SOURCE4_SOURCE5 17 1.8891 ca-ce-cd 64.6 130.88 SOURCE4_SOURCE5 29 1.2258 c -ce-cc 66.1 117.82 SOURCE4_SOURCE5 19 0.9022 c -ce-n2 88.2 114.41 SOURCE4_SOURCE5 8 1.4615 h4-ce-nf 64.3 120.56 SOURCE4_SOURCE5 33 0.8495 c1-ch-cd 58.9 178.61 SOURCE4_SOURCE5 7 0.3553 ch-cg-cg 60.6 179.58 SOURCE4_SOURCE5 48 0.3197 n -c -nf 113.6 110.26 SOURCE4_SOURCE5 15 1.6743 ca-cq-na 86.5 119.50 SOURCE4_SOURCE5 38 0.8587 nb-cq-nb 110.0 125.79 SOURCE4_SOURCE5 6 0.6645 cd-cx-hc 47.5 114.33 5/2017 9 0.7607 cf-cy-h2 45.9 117.40 SOURCE4_SOURCE5 21 0.5798 cf-cy-n 94.2 87.94 SOURCE4_SOURCE5 24 0.2234 cf-cy-ss 60.5 120.54 SOURCE4_SOURCE5 21 2.1971 cd-n2-na 91.8 109.24 SOURCE4_SOURCE5 14 1.5712 cd-n2-nh 88.7 118.47 SOURCE4_SOURCE5 7 1.6660 c3-n4-cd 64.4 111.04 SOURCE4_SOURCE5 11 1.9847 c3-na-cq 65.4 119.62 SOURCE4_SOURCE5 10 0.5495 ca-na-cq 67.0 120.86 SOURCE4_SOURCE5 38 1.4370 cd-na-cf 64.7 126.61 SOURCE4_SOURCE5 8 0.5158 cq-nb-nb 86.9 120.96 SOURCE4_SOURCE5 20 0.6372 c -n -cf 63.5 131.38 SOURCE4_SOURCE5 225 1.7874 ca-nc-nd 92.5 108.34 SOURCE4_SOURCE5 14 0.2755 c2-nf-ch 70.2 123.23 SOURCE4_SOURCE5 27 1.1966 c -nf-sy 65.6 116.43 SOURCE4_SOURCE5 10 2.0084 c3-nh-ce 65.1 120.12 SOURCE4_SOURCE5 32 2.1639 cd-nh-n2 85.5 120.09 SOURCE4_SOURCE5 16 0.9182 cd-nh-sy 63.0 122.52 SOURCE4_SOURCE5 37 1.3342 cf-nh-sy 65.3 113.39 SOURCE4_SOURCE5 8 1.1060 hn-n -nd 62.3 115.42 SOURCE4_SOURCE5 24 0.7584 cd-no-o 87.7 117.49 SOURCE4_SOURCE5 426 0.5387 n3-py-nf 79.4 108.76 SOURCE4_SOURCE5 18 1.1434 cd-s6-o 95.6 103.76 SOURCE4_SOURCE5 15 0.9562 cd-sh-hs 53.6 95.01 SOURCE4_SOURCE5 15 1.4000 c -ss-cd 75.1 94.89 SOURCE4_SOURCE5 18 1.2231 c3-sx-cd 73.1 95.18 SOURCE4_SOURCE5 24 0.6543 cd-sx-o 94.3 104.81 SOURCE4_SOURCE5 28 1.4279 c3-sy-cd 71.8 101.95 SOURCE4_SOURCE5 20 1.3784 ca-sy-cd 71.0 105.09 SOURCE4_SOURCE5 5 0.3628 ca-sy-nf 92.5 103.01 SOURCE4_SOURCE5 25 2.4137 cc-sy-nh 94.5 97.20 SOURCE4_SOURCE5 6 0.2429 n3-sy-nf 120.0 101.93 SOURCE4_SOURCE5 10 1.4898 cl-py-ne 67.2 109.16 SOURCE5 79 0.9726 ce-ce-nh 85.8 116.41 SOURCE5 70 1.9262 cp-ca-os 87.9 116.91 SOURCE5 38 1.2997 ca-cc-ca 65.3 122.94 SOURCE5 37 2.3284 h1-c3-i 39.3 103.88 SOURCE5 43 0.8359 h4-c2-h4 37.6 117.92 SOURCE5 46 1.0787 c -ss-ss 72.8 97.68 SOURCE5 29 1.7788 f -py-ne 83.7 108.60 SOURCE5 47 0.7739 ca-nh-ce 65.0 127.74 SOURCE5 32 0.9569 ce-cx-cx 64.2 118.62 5/2017 40 1.7472 py-ne-py 111.0 121.41 SOURCE5 34 1.5196 c -cd-ss 63.0 121.97 SOURCE5 29 2.1476 s -p5-ss 46.4 116.67 SOURCE5 27 1.1060 cx-c3-nh 86.6 103.86 5/2017 29 2.2522 cc-cc-cl 72.0 119.99 SOURCE5 43 1.9574 cd-na-cx 66.4 116.39 5/2017 14 0.5535 h1-cy-nh 61.8 110.00 5/2017 2 1.8569 h5-c -os 64.0 113.09 SOURCE5 20 0.1826 c2-c3-n4 81.9 113.64 SOURCE5 18 2.3563 c2-cx-c3 65.2 115.48 5/2017 22 1.1986 c3-c2-cx 64.8 117.87 5/2017 20 2.2886 br-cx-cx 62.7 119.04 5/2017 21 0.7114 cc-cf-ch 68.2 122.27 SOURCE5 30 0.9028 c3-c3-sx 63.1 110.50 SOURCE5 14 1.4461 ca-cy-hc 46.4 114.53 5/2017 17 1.6221 cx-c1-n1 74.2 178.25 5/2017 17 0.8798 cl-py-cl 61.6 101.95 SOURCE5 12 0.7596 c2-ce-cx 66.4 122.74 5/2017 23 1.5745 c3-c -cx 64.6 116.04 5/2017 14 1.1793 cf-cc-os 87.2 123.07 SOURCE5 15 1.3662 cd-cd-cl 72.0 119.99 SOURCE5 43 1.9574 c3-py-ca 46.1 107.27 SOURCE5 20 1.8136 c3-c3-py 80.6 111.57 SOURCE5 14 1.9142 c3-py-s 46.2 113.85 SOURCE5 14 0.3847 ca-c -cx 64.9 117.66 5/2017 20 1.5268 ce-ce-os 86.8 115.19 SOURCE5 15 2.1777 c3-n4-cx 62.6 117.29 SOURCE5 15 0.3164 h4-ce-sy 42.6 115.00 SOURCE5 20 1.1588 hx-cy-n4 60.5 106.00 5/2017 2 0.1412 cy-no-o 84.2 116.83 5/2017 17 1.1181 cc-cd-cx 66.1 124.15 5/2017 10 1.8770 ca-nb-na 87.3 118.78 SOURCE5 10 0.6408 cl-c3-cy 71.1 111.89 5/2017 12 0.7377 f -c2-h4 66.2 112.05 SOURCE5 13 0.7763 ca-py-s 46.0 116.31 SOURCE5 11 1.2602 cl-c3-cx 71.6 110.76 5/2017 9 1.3315 ca-nh-cy 64.7 123.81 5/2017 2 2.0914 cy-cy-no 79.8 115.43 5/2017 15 1.0848 ce-n1-n1 77.6 177.62 SOURCE5 10 0.5740 cy-cy-hx 45.0 115.92 5/2017 9 1.5918 ce-n -hn 48.1 113.83 SOURCE5 11 1.3642 c3-cx-cu 64.0 120.91 5/2017 11 0.4272 cf-cf-ne 86.6 120.79 SOURCE5 9 1.8014 f -p5-na 88.7 89.26 SOURCE5 12 1.2991 h4-ce-nh 62.3 115.58 SOURCE5 10 0.8050 ne-c -s 82.1 124.23 SOURCE5 9 1.7990 ca-os-py 83.0 123.31 SOURCE5 12 0.8994 cf-ce-cl 71.4 121.94 SOURCE5 20 1.2372 cy-cy-n4 81.5 110.88 5/2017 4 0.7688 na-cc-sh 79.2 122.95 SOURCE5 9 1.1542 nb-na-o 113.4 118.13 SOURCE5 11 0.6838 c -cx-n3 83.0 116.37 5/2017 2 0.1235 cd-cy-hc 48.3 107.20 5/2017 8 0.5300 f -c3-no 111.1 107.76 SOURCE5 11 0.3179 ce-cd-na 86.0 124.93 SOURCE5 9 0.9918 cq-cp-cq 69.7 108.02 SOURCE5 24 0.5633 os-py-s 59.8 116.22 SOURCE5 11 0.4580 c -c3-cy 65.4 110.88 5/2017 9 1.4172 cy-c2-ha 45.8 118.59 5/2017 5 1.8406 cp-cq-cp 69.7 108.02 SOURCE5 24 0.5633 cx-cu-cx 90.2 63.19 5/2017 12 0.2140 cu-c2-ha 50.4 121.49 5/2017 12 0.1524 cd-ce-cg 68.2 122.27 SOURCE5 30 0.9028 cf-ne-ne 87.9 113.17 SOURCE5 15 1.6715 c3-c2-no 82.8 115.94 SOURCE5 9 0.9963 f -cy-f 120.2 108.56 5/2017 9 1.2393 c2-cy-hc 46.9 112.80 5/2017 10 0.5936 c3-c2-cy 64.2 117.99 5/2017 10 1.8958 c -ce-h4 46.7 118.08 SOURCE5 8 2.4522 cf-cc-n 86.2 124.20 SOURCE5 10 0.8706 cd-cc-i 60.1 124.28 SOURCE5 14 1.7120 ce-cf-cl 71.4 121.94 SOURCE5 20 1.2372 cl-c3-p5 92.5 109.52 SOURCE5 9 0.8307 c2-c3-no 83.7 107.19 SOURCE5 9 0.5470 ce-nf-nf 87.9 113.17 SOURCE5 15 1.6715 c1-c3-cx 66.5 112.35 5/2017 11 0.3186 ce-c3-h2 46.9 112.27 SOURCE5 9 0.2011 na-cd-na 115.9 106.60 SOURCE5 10 1.3968 cx-cx-n4 80.4 118.73 5/2017 2 0.1804 c1-cx-hc 48.4 114.86 5/2017 6 0.1269 cg-ca-nb 87.9 116.87 SOURCE5 10 0.6088 ce-c2-f 90.0 122.62 SOURCE5 11 1.4117 cp-ca-cq 71.0 111.52 SOURCE5 8 0.0849 cl-py-nf 67.2 109.16 SOURCE5 79 0.9726 ca-c3-cy 65.2 112.32 5/2017 7 0.8064 ch-cd-nd 85.0 123.03 SOURCE5 7 0.2371 h1-cy-ss 41.6 111.82 5/2017 1 h5-cc-n2 64.0 123.28 SOURCE5 5 1.2554 cc-na-cy 64.0 126.88 5/2017 8 1.2393 c -c3-no 83.4 106.99 SOURCE5 8 1.0618 c3-py-c3 46.1 105.72 SOURCE5 10 2.4094 hx-c3-n3 60.7 111.73 SOURCE5 10 0.1463 cf-cf-nh 85.8 116.41 SOURCE5 70 1.9262 c3-n3-py 81.5 118.27 SOURCE5 8 1.5513 h5-c2-os 64.7 110.95 SOURCE5 9 1.4177 cc-c3-ce 66.3 110.89 SOURCE5 7 2.0183 n4-c3-p5 104.1 106.09 SOURCE5 10 1.7975 ne-cd-ss 79.5 126.00 SOURCE5 6 1.6775 na-cd-ne 111.3 122.47 SOURCE5 7 2.4448 cl-c3-h3 48.7 107.66 SOURCE5 10 0.1942 h5-c -s 44.0 123.51 SOURCE5 6 0.5125 cf-ce-ss 63.7 120.95 SOURCE5 15 1.8784 c3-c2-f 87.7 113.28 SOURCE5 8 1.0861 h4-c2-oh 64.5 114.61 SOURCE5 8 1.2250 ne-ce-nf 108.3 127.96 SOURCE5 10 1.2321 cc-n -cd 67.1 121.05 SOURCE5 7 0.3580 f -py-f 90.4 97.51 SOURCE5 5 0.2281 n -cc-os 110.4 119.02 SOURCE5 8 1.4066 cq-cp-nb 85.9 120.01 SOURCE5 14 1.1266 c -c -s 64.0 121.31 SOURCE5 8 0.9033 cf-ce-os 88.4 120.23 SOURCE5 8 2.3122 br-ce-c2 64.2 120.52 SOURCE5 8 0.4148 cp-nb-na 87.5 118.11 SOURCE5 5 0.5760 n -s6-oh 123.3 97.30 SOURCE5 8 0.9381 cd-c3-h2 47.7 110.47 SOURCE5 12 1.1111 nb-ca-sy 81.3 115.73 SOURCE5 6 0.4033 na-sy-o 123.0 105.30 SOURCE5 5 1.0811 hx-cx-hx 38.0 115.77 5/2017 9 0.0901 cd-cf-ne 86.1 122.39 SOURCE5 7 1.4919 h5-c -oh 65.3 109.49 SOURCE5 7 0.3600 cy-n -cy 71.4 94.55 SOURCE5 5 0.6286 br-c3-no 81.1 106.96 SOURCE5 6 2.2092 c2-ss-s4 73.2 92.42 SOURCE5 8 0.4009 c3-nh-o 85.5 117.53 SOURCE5 7 1.0041 br-cc-ss 65.7 120.06 SOURCE5 6 0.2609 c -ce-ss 64.5 113.23 SOURCE5 6 1.9344 c3-n -n3 82.1 117.56 SOURCE5 6 2.4546 h5-ca-na 62.5 115.80 SOURCE5 8 0.4738 n2-nh-oh 106.3 117.89 SOURCE5 6 0.2008 c2-c3-p5 80.8 112.22 SOURCE5 6 0.6523 c3-cx-nh 86.2 106.88 5/2017 1 c2-cc-ss 62.7 127.48 SOURCE5 6 0.3389 c -ca-na 84.3 117.81 SOURCE5 7 2.2477 cl-c2-n2 91.8 121.45 SOURCE5 8 0.8251 n2-s4-ne 122.2 104.29 SOURCE5 8 0.9503 nc-c -s 82.1 124.47 SOURCE5 7 1.3793 o -sy-ss 85.1 107.59 SOURCE5 7 2.0694 c2-ce-ss 63.0 123.86 SOURCE5 5 1.0553 c3-cx-ca 64.5 117.01 5/2017 6 1.1320 cc-cc-nf 87.3 121.68 SOURCE5 7 1.9093 ca-nd-cd 73.4 104.24 SOURCE5 8 0.2625 cc-n2-oh 89.3 113.25 SOURCE5 7 1.6484 ca-os-sy 63.8 118.01 SOURCE5 8 2.0392 hx-c3-p5 54.8 107.59 SOURCE5 7 1.8329 ca-ce-n 83.3 118.99 SOURCE5 8 0.3821 h4-ce-sx 41.7 115.27 SOURCE5 5 0.1053 c3-ce-ne 83.8 116.23 SOURCE5 5 1.2988 c1-n1-ce 61.7 176.87 SOURCE5 7 0.6686 c3-n2-cd 67.9 117.01 SOURCE5 6 1.8279 cc-c3-h2 47.7 110.47 SOURCE5 12 1.1111 ca-ce-cg 67.2 116.47 SOURCE5 5 1.0847 c2-cc-na 86.7 123.27 SOURCE5 6 1.9888 ca-c3-s4 63.9 109.52 SOURCE5 7 1.3239 n2-cf-nf 111.8 120.69 SOURCE5 6 1.4522 ce-cf-ss 63.7 120.95 SOURCE5 15 1.8784 c3-cx-ss 62.4 114.17 5/2017 4 0.0523 nh-ce-nh 108.6 119.71 SOURCE5 6 0.4946 cd-c -ne 87.0 112.22 SOURCE5 6 0.1806 na-c3-ss 82.9 103.15 SOURCE5 8 0.3361 cf-cf-os 86.8 115.19 SOURCE5 15 2.1777 cx-c3-h2 46.4 114.01 5/2017 8 0.8649 cv-ss-cy 79.2 82.62 SOURCE5 8 0.2654 ss-cy-ss 63.5 109.96 5/2017 1 ce-cx-os 82.8 117.21 SOURCE5 6 1.3466 nb-ca-ne 109.0 121.41 SOURCE5 6 1.6965 br-ca-nb 81.7 116.35 SOURCE5 5 0.4508 c3-nh-os 84.4 110.37 SOURCE5 6 2.4123 c2-nh-p5 81.1 125.90 SOURCE5 6 1.8594 br-ca-cp 63.6 121.39 SOURCE5 7 0.3403 cc-ce-cc 67.2 116.17 SOURCE5 6 0.4089 c3-nh-s6 63.9 116.49 SOURCE5 6 0.5375 cx-c3-na 82.4 114.78 5/2017 7 1.7481 ca-os-p3 85.6 110.46 SOURCE5 5 0.0025 ce-cf-sy 62.9 123.19 SOURCE5 5 0.3760 ca-n2-n1 92.5 118.48 SOURCE5 5 0.1464 cd-cd-no 82.3 125.95 SOURCE5 5 2.2787 na-n2-os 113.1 104.34 SOURCE5 6 0.3185 ce-c3-f 88.3 110.31 SOURCE5 6 0.9204 cx-cc-na 82.0 127.21 5/2017 7 2.0873 n -n2-na 113.9 106.04 SOURCE5 6 0.3975 c3-cf-cc 67.1 117.43 SOURCE5 5 2.0116 ca-na-cy 63.6 128.06 5/2017 7 0.2603 h1-c3-py 54.3 109.38 SOURCE5
import abc from collections import defaultdict import collections.abc import fnmatch import os from pathlib import Path, PosixPath, PurePosixPath, WindowsPath from typing import Any, IO, Iterable, Optional, TYPE_CHECKING, Union from urllib.parse import urlparse from warnings import warn from . import anypath from .exceptions import ( ClientMismatchError, CloudPathFileExistsError, CloudPathIsADirectoryError, CloudPathNotADirectoryError, DirectoryNotEmptyError, IncompleteImplementationError, InvalidPrefixError, MissingDependenciesError, NoStatError, OverwriteDirtyFileError, OverwriteNewerCloudError, OverwriteNewerLocalError, ) if TYPE_CHECKING: from .client import Client class CloudImplementation: def __init__(self): self.name = None self.dependencies_loaded = True self._client_class = None self._path_class = None def validate_completeness(self): expected = ["client_class", "path_class"] missing = [cls for cls in expected if getattr(self, f"_{cls}") is None] if missing: raise IncompleteImplementationError( f"Implementation is missing registered components: {missing}" ) if not self.dependencies_loaded: raise MissingDependenciesError( f"Missing dependencies for {self._client_class.__name__}. You can install them " f"with 'pip install cloudpathlib[{self.name}]'." ) @property def client_class(self): self.validate_completeness() return self._client_class @property def path_class(self): self.validate_completeness() return self._path_class implementation_registry: defaultdict = defaultdict(CloudImplementation) def register_path_class(key: str): def decorator(cls: type): if not issubclass(cls, CloudPath): raise TypeError("Only subclasses of CloudPath can be registered.") global implementation_registry implementation_registry[key]._path_class = cls cls._cloud_meta = implementation_registry[key] return cls return decorator class CloudPathMeta(abc.ABCMeta): def __call__(cls, cloud_path, *args, **kwargs): # cls is a class that is the instance of this metaclass, e.g., CloudPath # Dispatch to subclass if base CloudPath if cls == CloudPath: for implementation in implementation_registry.values(): path_class = implementation._path_class if path_class is not None and path_class.is_valid_cloudpath( cloud_path, raise_on_error=False ): # Instantiate path_class instance new_obj = path_class.__new__(path_class, cloud_path, *args, **kwargs) if isinstance(new_obj, path_class): path_class.__init__(new_obj, cloud_path, *args, **kwargs) return new_obj valid = [ impl._path_class.cloud_prefix for impl in implementation_registry.values() if impl._path_class is not None ] raise InvalidPrefixError( f"Path {cloud_path} does not begin with a known prefix " f"{valid}." ) # Otherwise instantiate as normal new_obj = cls.__new__(cls, cloud_path, *args, **kwargs) if isinstance(new_obj, cls): cls.__init__(new_obj, cloud_path, *args, **kwargs) return new_obj def __init__(cls, name, bases, dic): # Copy docstring from pathlib.Path for attr in dir(cls): if ( not attr.startswith("_") and hasattr(Path, attr) and hasattr(getattr(Path, attr), "__doc__") ): docstring = getattr(Path, attr).__doc__ + " _(Docstring copied from pathlib.Path)_" getattr(cls, attr).__doc__ = docstring if isinstance(getattr(cls, attr), property): # Properties have __doc__ duplicated under fget, and at least some parsers # read it from there. getattr(cls, attr).fget.__doc__ = docstring # Abstract base class class CloudPath(metaclass=CloudPathMeta): """Base class for cloud storage file URIs, in the style of the Python standard library's [`pathlib` module](https://docs.python.org/3/library/pathlib.html). Instances represent a path in cloud storage with filesystem path semantics, and convenient methods allow for basic operations like joining, reading, writing, iterating over contents, etc. `CloudPath` almost entirely mimics the [`pathlib.Path`](https://docs.python.org/3/library/pathlib.html#pathlib.Path) interface, so most familiar properties and methods should be available and behave in the expected way. Analogous to the way `pathlib.Path` works, instantiating `CloudPath` will instead create an instance of an appropriate subclass that implements a particular cloud storage service, such as [`S3Path`](../s3path). This dispatching behavior is based on the URI scheme part of a cloud storage URI (e.g., `"s3://"`). """ _cloud_meta: CloudImplementation cloud_prefix: str def __init__(self, cloud_path: Union[str, "CloudPath"], client: Optional["Client"] = None): self.is_valid_cloudpath(cloud_path, raise_on_error=True) # versions of the raw string that provide useful methods self._str = str(cloud_path) self._url = urlparse(self._str) self._path = PurePosixPath(f"/{self._no_prefix}") # setup client if client is None: if isinstance(cloud_path, CloudPath): client = cloud_path.client else: client = self._cloud_meta.client_class.get_default_client() if not isinstance(client, self._cloud_meta.client_class): raise ClientMismatchError( f"Client of type [{client.__class__}] is not valid for cloud path of type " f"[{self.__class__}]; must be instance of [{self._cloud_meta.client_class}], or " f"None to use default client for this cloud path class." ) self.client: Client = client # track if local has been written to, if so it may need to be uploaded self._dirty = False # handle if local file gets opened self._handle = None def __del__(self): # make sure that file handle to local path is closed if self._handle is not None: self._handle.close() @property def _no_prefix(self) -> str: return self._str[len(self.cloud_prefix) :] @property def _no_prefix_no_drive(self) -> str: return self._str[len(self.cloud_prefix) + len(self.drive) :] @classmethod def is_valid_cloudpath(cls, path: Union[str, "CloudPath"], raise_on_error=False) -> bool: valid = str(path).lower().startswith(cls.cloud_prefix.lower()) if raise_on_error and not valid: raise InvalidPrefixError( f"'{path}' is not a valid path since it does not start with '{cls.cloud_prefix}'" ) return valid def __repr__(self) -> str: return f"{self.__class__.__name__}('{self}')" def __str__(self) -> str: return self._str def __hash__(self) -> int: return hash((type(self).__name__, str(self))) def __eq__(self, other: Any) -> bool: return isinstance(other, type(self)) and str(self) == str(other) def __fspath__(self): if self.is_file(): self._refresh_cache(force_overwrite_from_cloud=False) return str(self._local) def __lt__(self, other: Any) -> bool: if not isinstance(other, type(self)): return NotImplemented return self.parts < other.parts def __le__(self, other: Any) -> bool: if not isinstance(other, type(self)): return NotImplemented return self.parts <= other.parts def __gt__(self, other: Any) -> bool: if not isinstance(other, type(self)): return NotImplemented return self.parts > other.parts def __ge__(self, other: Any) -> bool: if not isinstance(other, type(self)): return NotImplemented return self.parts >= other.parts # ====================== NOT IMPLEMENTED ====================== # absolute - no cloud equivalent; all cloud paths are absolute already # as_posix - no cloud equivalent; not needed since we assume url separator # chmod - permission changing should be explicitly done per client with methods # that make sense for the client permission options # cwd - no cloud equivalent # expanduser - no cloud equivalent # group - should be implemented with client-specific permissions # home - no cloud equivalent # is_absolute - no cloud equivalent; all cloud paths are absolute already # is_block_device - no cloud equivalent # is_char_device - no cloud equivalent # is_fifo - no cloud equivalent # is_mount - no cloud equivalent # is_reserved - no cloud equivalent # is_socket - no cloud equivalent # is_symlink - no cloud equivalent # lchmod - no cloud equivalent # lstat - no cloud equivalent # owner - no cloud equivalent # relative to - cloud paths are absolute # resolve - all cloud paths are absolute, so no resolving # root - drive already has the bucket and anchor/prefix has the scheme, so nothing to store here # symlink_to - no cloud equivalent # ====================== REQUIRED, NOT GENERIC ====================== # Methods that must be implemented, but have no generic application @property @abc.abstractmethod def drive(self) -> str: """For example "bucket" on S3 or "container" on Azure; needs to be defined for each class""" pass @abc.abstractmethod def is_dir(self) -> bool: """Should be implemented without requiring a dir is downloaded""" pass @abc.abstractmethod def is_file(self) -> bool: """Should be implemented without requiring that the file is downloaded""" pass @abc.abstractmethod def mkdir(self, parents: bool = False, exist_ok: bool = False): """Should be implemented using the client API without requiring a dir is downloaded""" pass @abc.abstractmethod def touch(self): """Should be implemented using the client API to create and update modified time""" pass # ====================== IMPLEMENTED FROM SCRATCH ====================== # Methods with their own implementations that work generically def __rtruediv__(self, other): raise ValueError( "Cannot change a cloud path's root since all paths are absolute; create a new path instead." ) @property def anchor(self) -> str: return self.cloud_prefix def as_uri(self) -> str: return str(self) def exists(self) -> bool: return self.client._exists(self) @property def fspath(self) -> str: return self.__fspath__() def glob(self, pattern: str) -> Iterable["CloudPath"]: # strip cloud prefix from pattern if it is included if pattern.startswith(self.cloud_prefix): pattern = pattern[len(self.cloud_prefix) :] # strip "drive" from pattern if it is included if pattern.startswith(self.drive + "/"): pattern = pattern[len(self.drive + "/") :] # identify if pattern is recursive or not recursive = False if pattern.startswith("**/"): pattern = pattern.split("/", 1)[-1] recursive = True for f in self.client._list_dir(self, recursive=recursive): if fnmatch.fnmatch(f._no_prefix_no_drive, pattern): yield f def iterdir(self) -> Iterable["CloudPath"]: for f in self.client._list_dir(self, recursive=False): yield f def open( self, mode="r", buffering=-1, encoding=None, errors=None, newline=None, force_overwrite_from_cloud=False, # extra kwarg not in pathlib force_overwrite_to_cloud=False, # extra kwarg not in pathlib ) -> IO: # if trying to call open on a directory that exists if self.exists() and not self.is_file(): raise CloudPathIsADirectoryError( f"Cannot open directory, only files. Tried to open ({self})" ) if mode == "x" and self.exists(): raise CloudPathFileExistsError(f"Cannot open existing file ({self}) for creation.") # TODO: consider streaming from client rather than DLing entire file to
"in-law", "continuation", "violates", "landlords", "smokers", "registering", "seiji", "oozing", "buildup", "avenging", "etched", "loon", "paddling", "keeler", "whooshes", "tenacity", "wheelbarrow", "giveaway", "hazards", "clinically", "cubby", "paroled", "'not", "indonesian", "vidya", "accumulate", "angelique", "flashlights", "whiny", "cranking", "lumberjack", "bargained", "developers", "laszlo", "taiwanese", "byrne", "appetites", "baz", "streetcar", "levin", "northwestern", "nak", "mccormick", "atonement", "nightstand", "bristow", "armistice", "shinichi", "kolkata", "clowning", "tightening", "bakersfield", "singsong", "advisory", "dyin", "fundraising", "upped", "headstrong", "prevailed", "hungover", "birthing", "stagecoach", "simulated", "megumi", "peddler", "belmont", "perfumes", "demonstrates", "ecological", "ola", "grievance", "goddesses", "figment", "smalls", "debriefing", "minorities", "shelling", "daredevil", "clean-up", "punctuality", "kush", "leapt", "burnin", "businesswoman", "sulphur", "organizer", "tal", "ringleader", "tantrums", "idealist", "inkling", "door-to-door", "captives", "chloride", "musa", "ang", "foresight", "pram", "do-do", "crippling", "bungee", "fashions", "hemp", "gunfight", "registers", "celibacy", "anthropologist", "ressler", "libido", "modification", "deviant", "jib", "pests", "aaa", "pensions", "hummer", "disagreeable", "daunting", "consolidated", "breen", "barbra", "slipstream", "tr", "moderation", "blissful", "thirty-six", "moly", "europa", "innate", "viii", "patents", "jerrod", "yoke", "necklaces", "comical", "physiological", "naps", "gan", "valdez", "ware", "varies", "jimenez", "hanley", "budding", "carburetor", "traffickers", "unveiling", "billiard", "suresh", "longs", "klutz", "benefited", "outlawed", "homestead", "cooke", "guantanamo", "dugan", "balboa", "bingham", "soju", "loosened", "alessandro", "watergate", "pompey", "expands", "protestants", "dima", "adhere", "constipated", "fixation", "decently", "chrissake", "epileptic", "booted", "rockwell", "some-", "meteors", "mowing", "ocd", "yuji", "bup", "aftershave", "malta", "animosity", "whisk", "lilac", "greets", "marwan", "marika", "lombard", "comatose", "glows", "aided", "burroughs", "greats", "altman", "sentries", "grendizer", "dentures", "swindled", "tnt", "nobu", "hadji", "resin", "fictitious", "kelp", "slush", "predatory", "contention", "navigational", "pairing", "emptying", "pacifist", "fdr", "atropine", "compress", "unser", "tip-off", "quart", "wgbh", "chamomile", "spiteful", "brawn", "putt", "spirituality", "tyrants", "valedictorian", "bureaucrat", "poacher", "porters", "associations", "formulas", "hackett", "embroidery", "gratifying", "cps", "westen", "reputable", "glaze", "wrestled", "sufferings", "yume", "aragon", "relocation", "harden", "amour", "how-", "rammed", "unites", "helo", "dupont", "jiggle", "symbolizes", "brainwashing", "seer", "ch040200", "gui", "symmetrical", "wikipedia", "cervical", "hoon", "law-abiding", "bajoran", "usable", "voyages", "sumner", "lurcio", "runaways", "elemental", "séance", "terrier", "chiana", "mothership", "shielded", "promotional", "tweak", "fixated", "mistakenly", "hellfire", "juju", "auctioneer", "flickering", "ak", "tutu", "isolde", "merlyn", "receding", "underestimating", "defibrillator", "chassis", "incomparable", "padding", "hazmat", "mosaic", "craters", "cheeses", "sickbay", "locator", "yielded", "turbine", "sporty", "paz", "idling", "disdain", "mitts", "hounding", "inflamed", "inverted", "corresponds", "rosalind", "gulping", "suspiciously", "polyester", "affinity", "sforza", "shifty", "whitechapel", "realises", "treading", "snotlout", "triggering", "crowder", "celibate", "getup", "wedlock", "mucus", "metamorphosis", "tourniquet", "danni", "signe", "irv", "ablaze", "illuminated", "cardiologist", "schanke", "inflated", "artiste", "lindbergh", "highs", "bookshop", "tc", "so.", "impotence", "muted", "chevalier", "unhand", "tambourine", "gro", "jus", "venerable", "doberman", "mogo", "1800s", "giuliano", "wanton", "transient", "stepdaughter", "responsive", "supermarkets", "invasive", "ofhis", "rots", "greendale", "wimps", "bajor", "blister", "bray", "exchanges", "attagirl", "doubly", "islanders", "l`ve", "befall", "psychedelic", "faust", "porcupine", "deteriorating", "i.e.", "escrow", "sprinklers", "saracen", "notoriously", "gifford", "passover", "spidey", "anterior", "backfire", "tokens", "ajob", "apricot", "venomous", "they-they", "brisk", "dhs", "pac", "nativity", "mahogany", "kunal", "kondo", "microchip", "cai", "spar", "massey", "listen-", "ardent", "no-brainer", "orin", "psychos", "jumpsuit", "amin", "stinging", "rustles", "woolly", "clitoris", "verification", "would-", "admissible", "uther", "caws", "poppa", "patronizing", "scandinavian", "ailing", "triads", "detailing", "magdalene", "standoff", "specifications", "resumed", "braids", "bridger", "townspeople", "brainstorm", "dei", "breakout", "tangerine", "tzu", "insidious", "gumbo", "icons", "watchful", "cruisers", "somalia", "crucifixion", "piranha", "darkened", "ratty", "protagonist", "pyre", "huffs", "ima", "zoning", "great-grandmother", "we`ll", "bracket", "nuño", "scrappy", "decorum", "childress", "stifled", "sd-6", "ballots", "bayonets", "applejack", "dmitry", "decomposition", "voucher", "taoist", "shat", "raindrops", "wrought", "converge", "paisley", "almeida", "changer", "chipmunk", "bloc", "airstrip", "amore", "clog", "tinder", "dreamers", "lien", "tolstoy", "molester", "transference", "lightman", "algerian", "lucio", "stomped", "buenas", "qualification", "perps", "repertoire", "stoker", "pester", "backdrop", "optics", "bff", "müller", "counters", "statutory", "deport", "gyeon-woo", "waterfalls", "cuffed", "rosebud", "molasses", "baltar", "strengthened", "renovations", "haw", "ville", "mastery", "marci", "feasible", "gospels", "clément", "sixties", "drastically", "flourished", "pokemon", "priced", "borneo", "header", "quigley", "inhaled", "'est-ce", "considerations", "towing", "cranial", "purified", "summoning", "attachments", "ngo", "mcgregor", "moot", "opus", "nonexistent", "meaner", "poncho", "paulette", "sheriffs", "agitation", "indiscretion", "escalator", "outsmart", "undying", "armin", "bilfran", "trav", "glazed", "fairer", "100th", "vino", "heathens", "flicker", "weirdness", "stomachache", "downed", "stony", "charmaine", "enema", "rector", "platt", "alight", "ransacked", "bartlet", "shortcomings", "wedged", "skyscrapers", "goto", "uh-uh-uh", "krieger", "femoral", "forcefully", "overseeing", "rosalee", "sparrows", "polaroid", "beaut", "dori", "woozy", "pediatric", "holla", "slacks", "barr", "mikado", "pleas", "infatuation", "tlhe", "tuscany", "tasked", "churning", "aloof", "extremist", "billiards", "plentiful", "kingpin", "gull", "assurances", "faculties", "existential", "loudest", "sixpence", "nodes", "tremor", "things-", "incorrigible", "hanover", "overprotective", "cui", "llama", "wearin", "coupe", "inexcusable", "botanical", "pastrami", "cooperated", "mortgaged", "day-", "dimwit", "nurtured", "subtlety", "quarreled", "whiplash", "zhan", "flipper", "kristy", "fie", "baby-sitting", "racks", "t-bone", "hp", "interferes", "stroking", "recklessly", "thyroid", "scaffold", "epa", "off-line", "livingstone", "instructors", "burritos", "thereof", "drat", "lleó", "yagyu", "protestors", "lode", "tripod", "mcmanus", "tris", "mullet", "teyla", "squeamish", "euphemism", "tarnished", "brahms", "ancestry", "becks", "ramifications", "tracer", "pedals", "steamy", "beginners", "babcock", "skydiving", "liqueur", "tribeca", "conspirators", "renard", "napalm", "infidels", "shrimps", "outbursts", "tosser", "hops", "dramafever", "dictation", "yukiko", "quivering", "rationally", "alleviate", "cloaking", "crayons", "cheekbones", "complicity", "blowtorch", "avoids", "gettysburg", "veggies", "departs", "anne-marie", "duds", "it--it", "old-timer", "conservatives", "endowed", "meanings", "ph.d.", "jean-claude", "'after", "coon", "clutter", "mahmoud", "leona", "cob", "endeavour", "hoses", "jackals", "tightened", "infatuated", "tamper", "kishan", "bagging", "mallet", "bram", "clemency", "plummer", "suffocation", "ikea", "pointer", "nugent", "need-", "strapping", "shoddy", "juanito", "saturated", "husky", "jase", "cornish", "smooches", "shik", "prejudices", "incapacitated", "gar", "precedes", "anaesthetic", "skyscraper", "assigning", "that--that", "imperialism", "'leary", "sinai", "validate", "robotics", "apprehension", "boning", "bharat", "objectively", "amputate", "sanskrit", "calibre", "attained", "computing", "high-risk", "jr.", "confuses", "sprinkler", "unfairly", "shhhh", "unused", "evidences", "dwyer", "waged", "revision", "melodies", "soulless", "amorous", "head-to-head", "screwy", "reinvent", "pesticide", "adulthood", "lessen", "amjad", "balkans", "confines", "footballer", "renovated", "begone", "cynicism", "ditches", "lun", "pitied", "innings", "tuppence", "deserting", "night-", "seedy", "robb", "luanne", "korsak", "broadcasts", "sociology", "unparalleled", "chomping", "crawls", "shortcuts", "punishments", "portals", "antiquity", "ooh-ooh-ooh", "peaked", "reigning", "banal", "rihanna", "dinghy", "cynic", "constables", "paulson", "custodian", "knowles", "coughed", "hornblower", "acquittal", "anja", "handover", "'brian", "lipton", "manhole", "launchpad", "caresses", "marksman", "sultry", "impressionable", "ofhere", "clipper", "jayne", "preferable", "amazes", "confesses", "retrieval", "timers", "petal", "matisse", "merc", "hastily", "vaginas", "flowery", "nosey", "omg", "transparency", "stimulus", "clad", "gondola", "hooters", "schwarzenegger", "browsing", "pecking", "matti", "henson", "rockford", "alluring", "demographic", "'by", "adalind", "drips", "zorn", "foa", "strategically", "absorbs", "scamp", "please-", "inaccurate", "forthwith", "sponges", "grapevine", "womanizer", "periodic", "occupants", "nae", "piggyback", "malachi", "man-to-man", "cite", "parachutes", "brunt", "b-but", "seekers", "disagreements", "shalini", "filmmaking", "suffolk", "schnell", "adjoining", "gooey", "p.j.", "slacking", "lyndsey", "luz", "speculating", "screened", "cleary", "aberdeen", "simultaneous", "lint", "oft", "mariko", "bartowski", "stooge", "cross-eyed", "drafts", "titled", "symbolism", "first-aid", "gradual", "clack", "impatience", "enlightening", "migrants", "ludo", "anchors", "declining", "rena", "protested", "secondhand", "terrorize", "ineffective", "dobson", "syllable", "verbally", "wrongdoing", "rourke", "utilities", "kool-aid", "acknowledging", "primate", "lout", "stratton", "ηe", "bookies", "revert", "racked", "congratulation", "dorrit", "sarcastically", "aquarius", "atari", "chalet", "greenberg", "bern", "disgusts", "unfolding", "pax", "persuading", "nicks", "kia", "adhesive", "miyamoto", "understandably", "crease", "resists", "todo", "bello", "carina", "dekker", "ojai", "cu", "spasm", "kafka", "tweeted", "mahesh", "recapture", "anemia", "disbanded", "dorky", "she`s", "aptitude", "dominoes", "trini", "ovens", "hutchinson", "god-given", "carelessness", "simpleton", "lll", "indigo", "chewy", "kal-el", "nom", "icelandic", "amend", "daze", "candidacy", "foreseen", "playa", "sondra", "muffler", "unfulfilled", "pertaining", "cardassians", "no.1", "tegan", "caribou", "gangrene", "dunne", "coliseum", "splatter", "rectum", "tripe", "lawless", "zimmerman", "salutations", "willful", "sludge", "imbeciles", "cockney", "cantor", "negotiable", "squirm", "foon", "flaky", "fail-safe", "spaceships", "contaminate", "mcneil", "know-how", "syllables", "nines", "ichabod", "discriminate", "enclosure", "gagged", "tabatha", "warmly", "warmest", "sweeper", "grader", "'pol", "moldy", "clemenza", "wicker", "koto", "caracas", "embraces", "gully", "muad", "decimated", "docs", "inward", "romances", "piety", "takeaway", "deflect", "kryten", "lanie", "self-inflicted", "pomp", "rebelled", "indulging", "zapped", "wer", "sandbox", "buttered", "lange", "bingley", "invader", "feeder", "pla", "behead", "forceps", "reopening", "reinstate", "legislative", "guys-", "veg", "carpool", "zest", "jeju", "defer", "topple", "mutilation", "cloths", "secretariat", "shrill", "reliving", "adorned", "'dib", "quarterly", "ailment", "start-up", "contagion", "parading", "sustenance", "desertion", "assures", "egghead", "sledgehammer", "simulator", "temperamental", "edict", "sensibility", "farkle", "fuehrer", "impetuous", "notable", "dollhouse", "stimulated", "succumbed", "corrine", "shalimar", "tightrope", "conspire", "goatee", "eduard", "tigress", "eugenia", "bulky", "endo", "hemlock", "la-la-la", "scoops", "salisbury", "quint", "anything-", "headway", "carelessly", "incinerated", "grisly", "gait", "unscathed", "zinc", "midsomer", "sneezed", "thanks.", "stethoscope", "dοn", "raking", "bibles", "sono", "she-she", "lobbying", "strait", "xiii", "owt", "erickson", "finlay", "leveled", "vipers", "hookup", "commonplace", "resentful", "crick", "boyz", "ranjit", "supersonic", "bigelow", "exclaim", "patronage", "dieu", "burmese", "somethings", "academics", "egos", "constabulary", "deadlines", "morley", "farid", "bonanza", "nozzle", "distasteful", "prod", "cleave", "henshaw", "wessex", "discourse", "overcoming", "magna", "philanthropist", "crapped", "expanse", "yow", "typhus", "betel", "remo", "disrespected", "yasmin", "electra", "buzzard", "marlo", "darrow", "expectancy", "madmen", "jutsu", "dazzled", "waller", "dogg", "citrus", "needlessly", "attends", "aoki", "truckload", "aramis", "cancelling", "unsettled", "seared", "'twas", "singularity", "thaddeus", "noo", "chartered", "godson", "cuthbert", "venting", "contradictory", "cahoots", "bridgette", "decidedly", "lac", "knobs", "artificially", "athletics", "schoolwork", "hürrem", "faux", "copped", "gecko", "competed", "trois", "barring", "polk", "spanner", "dropout", "uhtred", "ina", "couscous", "hikari", "sherri", "mumps", "napa", "enchantment", "reprieve", "headstone", "the--the", "icky", "drago", "siberian", "audacious", "pecan", "skylight", "juno", "twittering", "ronda", "barmaid", "commendation", "cartilage", "mucking", "suppression", "plume", "delores", "madre", "helix", "sultana", "'please", "twinkling", "zola", "prentice", "sg-1", "pajama", "informative", "protégé", "nice-looking", "suave", "badminton", "yoshioka", "graff", "karp", "lander", "guevara", "bolshevik", "halstead", "unmanned", "stair", "idealistic", "breakin", "disowned", "doormat", "conquests",
occur. Please see Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI principles at https://www.microsoft.com/ai/responsible-ai. """ def __init__(self, **kwargs): self.model_version = kwargs.get("model_version", "latest") self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint") self.disable_service_logs = kwargs.get("disable_service_logs", False) def __repr__(self, **kwargs): return "RecognizeEntitiesAction(model_version={}, string_index_type={}, disable_service_logs={})".format( self.model_version, self.string_index_type, self.disable_service_logs )[ :1024 ] def _to_generated(self, api_version): if api_version == DEFAULT_API_VERSION: from ._generated.v3_2_preview_1 import models else: from ._generated.v3_1 import models return models.EntitiesTask( parameters=models.EntitiesTaskParameters( model_version=self.model_version, string_index_type=self.string_index_type, logging_opt_out=self.disable_service_logs, ) ) class AnalyzeSentimentAction(DictMixin): """AnalyzeSentimentAction encapsulates the parameters for starting a long-running Sentiment Analysis operation. If you just want to analyze sentiment in a list of documents, and not perform multiple long running actions on the input of documents, call method `analyze_sentiment` instead of interfacing with this model. :keyword str model_version: The model version to use for the analysis. :keyword bool show_opinion_mining: Whether to mine the opinions of a sentence and conduct more granular analysis around the aspects of a product or service (also known as aspect-based sentiment analysis). If set to true, the returned :class:`~azure.ai.textanalytics.SentenceSentiment` objects will have property `mined_opinions` containing the result of this analysis. :keyword str string_index_type: Specifies the method used to interpret string offsets. `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default, you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information see https://aka.ms/text-analytics-offsets :keyword bool disable_service_logs: If set to true, you opt-out of having your text input logged on the service side for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to allow for troubleshooting issues in providing you with the Text Analytics natural language processing functions. Setting this parameter to true, disables input logging and may limit our ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI principles at https://www.microsoft.com/ai/responsible-ai. :ivar str model_version: The model version to use for the analysis. :ivar bool show_opinion_mining: Whether to mine the opinions of a sentence and conduct more granular analysis around the aspects of a product or service (also known as aspect-based sentiment analysis). If set to true, the returned :class:`~azure.ai.textanalytics.SentenceSentiment` objects will have property `mined_opinions` containing the result of this analysis. :ivar str string_index_type: Specifies the method used to interpret string offsets. `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default, you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information see https://aka.ms/text-analytics-offsets :ivar bool disable_service_logs: If set to true, you opt-out of having your text input logged on the service side for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to allow for troubleshooting issues in providing you with the Text Analytics natural language processing functions. Setting this parameter to true, disables input logging and may limit our ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI principles at https://www.microsoft.com/ai/responsible-ai. """ def __init__(self, **kwargs): self.model_version = kwargs.get("model_version", "latest") self.show_opinion_mining = kwargs.get("show_opinion_mining", False) self.string_index_type = kwargs.get("string_index_type", None) self.disable_service_logs = kwargs.get("disable_service_logs", False) def __repr__(self, **kwargs): return ( "AnalyzeSentimentAction(model_version={}, show_opinion_mining={}, string_index_type={}, " "disable_service_logs={}".format( self.model_version, self.show_opinion_mining, self.string_index_type, self.disable_service_logs, )[:1024] ) def _to_generated(self, api_version): if api_version == DEFAULT_API_VERSION: from ._generated.v3_2_preview_1 import models else: from ._generated.v3_1 import models return models.SentimentAnalysisTask( parameters=models.SentimentAnalysisTaskParameters( model_version=self.model_version, opinion_mining=self.show_opinion_mining, string_index_type=self.string_index_type, logging_opt_out=self.disable_service_logs, ) ) class RecognizePiiEntitiesAction(DictMixin): """RecognizePiiEntitiesAction encapsulates the parameters for starting a long-running PII Entities Recognition operation. If you just want to recognize pii entities in a list of documents, and not perform multiple long running actions on the input of documents, call method `recognize_pii_entities` instead of interfacing with this model. :keyword str model_version: The model version to use for the analysis. :keyword str domain_filter: An optional string to set the PII domain to include only a subset of the PII entity categories. Possible values include 'phi' or None. :keyword categories_filter: Instead of filtering over all PII entity categories, you can pass in a list of the specific PII entity categories you want to filter out. For example, if you only want to filter out U.S. social security numbers in a document, you can pass in `[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]` for this kwarg. :paramtype categories_filter: list[~azure.ai.textanalytics.PiiEntityCategory] :keyword str string_index_type: Specifies the method used to interpret string offsets. `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default, you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information see https://aka.ms/text-analytics-offsets :keyword bool disable_service_logs: Defaults to true, meaning that Text Analytics will not log your input text on the service side for troubleshooting. If set to False, Text Analytics logs your input text for 48 hours, solely to allow for troubleshooting issues in providing you with the Text Analytics natural language processing functions. Please see Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI principles at https://www.microsoft.com/ai/responsible-ai. :ivar str model_version: The model version to use for the analysis. :ivar str domain_filter: An optional string to set the PII domain to include only a subset of the PII entity categories. Possible values include 'phi' or None. :ivar categories_filter: Instead of filtering over all PII entity categories, you can pass in a list of the specific PII entity categories you want to filter out. For example, if you only want to filter out U.S. social security numbers in a document, you can pass in `[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]` for this kwarg. :vartype categories_filter: list[~azure.ai.textanalytics.PiiEntityCategory] :ivar str string_index_type: Specifies the method used to interpret string offsets. `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default, you can also pass in `Utf16CodePoint` or TextElement_v8`. For additional information see https://aka.ms/text-analytics-offsets :ivar bool disable_service_logs: Defaults to true, meaning that Text Analytics will not log your input text on the service side for troubleshooting. If set to False, Text Analytics logs your input text for 48 hours, solely to allow for troubleshooting issues in providing you with the Text Analytics natural language processing functions. Please see Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI principles at https://www.microsoft.com/ai/responsible-ai. """ def __init__(self, **kwargs): self.model_version = kwargs.get("model_version", "latest") self.domain_filter = kwargs.get("domain_filter", None) self.categories_filter = kwargs.get("categories_filter", None) self.string_index_type = kwargs.get("string_index_type", "UnicodeCodePoint") self.disable_service_logs = kwargs.get("disable_service_logs", True) def __repr__(self, **kwargs): return ( "RecognizePiiEntitiesAction(model_version={}, domain_filter={}, categories_filter={}, " "string_index_type={}, disable_service_logs={}".format( self.model_version, self.domain_filter, self.categories_filter, self.string_index_type, self.disable_service_logs, )[:1024] ) def _to_generated(self, api_version): if api_version == DEFAULT_API_VERSION: from ._generated.v3_2_preview_1 import models else: from ._generated.v3_1 import models return models.PiiTask( parameters=models.PiiTaskParameters( model_version=self.model_version, domain=self.domain_filter, pii_categories=self.categories_filter, string_index_type=self.string_index_type, logging_opt_out=self.disable_service_logs, ) ) class ExtractKeyPhrasesAction(DictMixin): """ExtractKeyPhrasesAction encapsulates the parameters for starting a long-running key phrase extraction operation If you just want to extract key phrases from a list of documents, and not perform multiple long running actions on the input of documents, call method `extract_key_phrases` instead of interfacing with this model. :keyword str model_version: The model version to use for the analysis. :keyword bool disable_service_logs: If set to true, you opt-out of having your text input logged on the service side for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to allow for troubleshooting issues in providing you with the Text Analytics natural language processing functions. Setting this parameter to true, disables input logging and may limit our ability to remediate issues that occur. Please see Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for additional details, and Microsoft Responsible AI principles at https://www.microsoft.com/ai/responsible-ai. :ivar str model_version: The model version to use for the analysis. :ivar bool disable_service_logs: If set to true, you opt-out of having your text input logged on the service side for troubleshooting. By default, Text Analytics logs your input text for 48 hours, solely to allow for troubleshooting issues in providing you with the Text Analytics natural language processing functions. Setting this parameter to true, disables input logging and may limit our ability to remediate issues
# -------------------------------------------------------------------------------------------------- # Model reduction tool functions # <NAME> # 5/11/2021 # # Reference: # Wang, Bin, <NAME>, and <NAME>. 2021. Power System Network Reduction for Power Hardware-in-the-Loop Simulation: Preprint. # Golden, CO: National Renewable Energy Laboratory. NREL/CP-5D00-78372. https://www.nrel.gov/docs/fy21osti/78372.pdf # -------------------------------------------------------------------------------------------------- import xlrd import numpy import math # -------------------------------------------------------------------------------------------------- # power flow data class class PFData(): def __init__(self): # system data self.basemva = [] # bus data self.bus_num = [] self.bus_type = [] self.bus_Vm = [] self.bus_Va = [] self.bus_kV = [] self.bus_basekV = [] self.bus_name = [] self.bus_area = [] self.bus_zone = [] self.bus_owner = [] # load data self.load_id = [] self.load_bus = [] self.load_Z = [] self.load_I = [] self.load_P = [] self.load_MW = [] self.load_Mvar = [] self.load_MW_total = [] self.load_Mvar_total = [] # generator data self.gen_id = [] self.gen_bus = [] self.gen_S = [] self.gen_mod = [] self.gen_MW = [] self.gen_Mvar = [] self.gen_MW_total = [] self.gen_Mvar_total = [] self.gen_MW_ll = [] self.gen_MW_ul = [] self.gen_Mvar_ll = [] self.gen_Mvar_ul = [] self.gen_MVA_base = [] # branch data self.brc_from = [] self.brc_to = [] self.brc_id = [] self.brc_rateA = [] self.brc_rateB = [] self.brc_S = [] self.brc_P = [] self.brc_Q = [] self.brc_line_id = [] self.brc_xfmr_id = [] # shunt data self.shunt_id = [] self.shunt_bus = [] self.shunt_S_NOM = [] self.shunt_MW_NOM = [] self.shunt_Mvar_NOM = [] def getdata(self, psspy): # system data self.basemva = psspy.sysmva() # bus data self.bus_num = psspy.abusint(-1, 2, 'NUMBER')[1][0] self.bus_type = psspy.abusint(-1, 2, 'TYPE')[1][0] self.bus_Vm = psspy.abusreal(-1, 2, 'PU')[1][0] self.bus_Va = psspy.abusreal(-1, 2, 'ANGLE')[1][0] self.bus_kV = psspy.abusreal(-1, 2, 'KV')[1][0] self.bus_basekV = psspy.abusreal(-1, 2, 'BASE')[1][0] self.bus_name = psspy.abuschar(-1, 1, 'NAME')[1][0] self.bus_area = psspy.abusint(-1, 1, 'AREA')[1][0] self.bus_zone = psspy.abusint(-1, 1, 'ZONE')[1][0] self.bus_owner = psspy.abusint(-1, 1, 'OWNER')[1][0] # load data self.load_id = psspy.aloadchar(-1, 1, 'ID')[1][0] self.load_bus = psspy.aloadint(-1, 1, 'NUMBER')[1][0] self.load_Z = numpy.asarray(psspy.aloadcplx(-1, 1, 'YLACT')[1][0]) self.load_I = numpy.asarray(psspy.aloadcplx(-1, 1, 'ILACT')[1][0]) self.load_P = numpy.asarray(psspy.aloadcplx(-1, 1, 'MVAACT')[1][0]) self.load_MW = self.load_Z.real + self.load_I.real + self.load_P.real self.load_Mvar = self.load_Z.imag + self.load_I.imag + self.load_P.imag self.load_MW_total = sum(self.load_MW) self.load_Mvar_total = sum(self.load_Mvar) # generator data self.gen_id = psspy.amachchar(-1, 1, 'ID')[1][0] self.gen_bus = psspy.amachint(-1, 1, 'NUMBER')[1][0] self.gen_S = numpy.asarray(psspy.amachcplx(-1, 1, 'PQGEN')[1][0]) self.gen_mod = numpy.asarray(psspy.amachint(-1, 1, 'WMOD')[1][0]) self.gen_MW = self.gen_S.real self.gen_Mvar = self.gen_S.imag self.gen_MW_total = sum(self.gen_MW) self.gen_Mvar_total = sum(self.gen_Mvar) self.gen_MW_ll = psspy.amachreal(-1, 1, 'PMIN')[1][0] self.gen_MW_ul = psspy.amachreal(-1, 1, 'PMAX')[1][0] self.gen_Mvar_ll = psspy.amachreal(-1, 1, 'QMIN')[1][0] self.gen_Mvar_ul = psspy.amachreal(-1, 1, 'QMAX')[1][0] self.gen_MVA_base = psspy.amachreal(-1, 1, 'MBASE')[1][0] # branch data ierr, iarray = psspy.abrnint(-1, 0, 0, 3, 2, ['FROMNUMBER', 'TONUMBER']) self.brc_from = iarray[0][:] self.brc_to = iarray[1][:] self.brc_id = psspy.abrnchar(-1, 0, 0, 3, 2, ['ID'])[1][0] self.brc_rateA = psspy.abrnreal(-1, 0, 0, 3, 2, ['RATEA'])[1][0] self.brc_rateB = psspy.abrnreal(-1, 0, 0, 3, 2, ['RATEB'])[1][0] self.brc_S = numpy.asarray(psspy.abrncplx(-1, 1, 1, 3, 2, ['PQ'])[1][0]) self.brc_P = self.brc_S.real self.brc_Q = self.brc_S.imag self.brc_line_id = psspy.abrnchar(-1, 0, 0, 1, 1, ['ID'])[1][0] self.brc_xfmr_id = psspy.abrnchar(-1, 0, 0, 5, 1, ['ID'])[1][0] # shunt data self.shunt_id = psspy.afxshuntchar(-1, 1, 'ID')[1][0] self.shunt_bus = psspy.afxshuntint(-1, 1, 'NUMBER')[1][0] self.shunt_S_NOM = numpy.asarray(psspy.afxshuntcplx(-1, 1, 'SHUNTNOM')[1][0]) self.shunt_MW_NOM = self.shunt_S_NOM.real self.shunt_Mvar_NOM = self.shunt_S_NOM.imag def read_bus(SPE_list): list_file = xlrd.open_workbook(SPE_list) sheet = list_file.sheet_by_index(0) N_list = sheet.ncols rootbus_list = [] d1bus_list = [] redbus_list = [] for coli in range(N_list): N_len_i = int(sheet.cell_value(0, coli)) N_d1bus_i = int(sheet.cell_value(1, coli)) rootbus_i = int(sheet.cell_value(2, coli)) d1bus_i = [] for ii in range(N_d1bus_i): tempbus = sheet.cell_value(ii + 3, coli) if tempbus: d1bus_i.append(int(tempbus)) redbus_i = [] for ii in range(N_len_i - 1): redbus_i.append(int(sheet.cell_value(ii + 3, coli))) rootbus_list.append(rootbus_i) d1bus_list.append(d1bus_i) redbus_list.append(redbus_i) return rootbus_list, redbus_list, d1bus_list, N_list def read_subsys(pfd, bus_rut, bus_red, bus_d1): # combine all loads on buses in reduced area PL = 0 # MW QL = 0 # Mvar load_bus = [] for i in range(len(pfd.load_bus)): # consider multiple loads on the same bus busi = pfd.load_bus[i] if busi in bus_red: PL = PL + pfd.load_MW[i] QL = QL + pfd.load_Mvar[i] if busi not in load_bus: load_bus.append(busi) # combine all shunts on buses in reduced area PS = 0.0 # MW QS = 0.0 # Mvar shunt_bus = [] for i in range(len(pfd.shunt_bus)): # consider multiple shunts on the same bus busi = pfd.shunt_bus[i] busi_idx = pfd.bus_num.index(busi) if busi in bus_red: PS = PS + pfd.shunt_MW_NOM[i] QS = QS + pfd.shunt_Mvar_NOM[i] if busi not in shunt_bus: shunt_bus.append(busi) # combine all generations on buses in reduced area PG = 0.0 # MW QG = 0.0 # MW MW_ll = 0.0 MW_ul = 0.0 Mvar_ll = 0.0 Mvar_ul = 0.0 MVA_base = 0.0 gen_bus = [] for busi in bus_red: last_found = -1 while busi in pfd.gen_bus[last_found + 1:]: # consider multiple gens on the same bus last_found = pfd.gen_bus.index(busi, last_found + 1) if last_found == -1: break PG = PG + pfd.gen_MW[last_found] QG = QG + pfd.gen_Mvar[last_found] MW_ll = MW_ll + pfd.gen_MW_ll[last_found] MW_ul = MW_ul + pfd.gen_MW_ul[last_found] Mvar_ll = Mvar_ll + pfd.gen_Mvar_ll[last_found] Mvar_ul = Mvar_ul + pfd.gen_Mvar_ul[last_found] MVA_base = MVA_base + pfd.gen_MVA_base[last_found] if busi not in gen_bus: gen_bus.append(busi) # get the average voltage magnitude n = 0 vn = 0 for busi in bus_red: last_found = -1 while busi in pfd.load_bus[last_found + 1:]: last_found = pfd.load_bus.index(busi, last_found + 1) if last_found == -1: break n = n + 1 bus_idx = pfd.bus_num.index(busi) vn = vn + pfd.bus_Vm[bus_idx] last_found = -1 while busi in pfd.gen_bus[last_found + 1:]: last_found = pfd.gen_bus.index(busi, last_found + 1) if last_found == -1: break n = n + 1 bus_idx = pfd.bus_num.index(busi) vn = vn + pfd.bus_Vm[bus_idx] Ve = vn / n # get Pin, Qin, Vm and Va at root bus bus_idx = pfd.bus_num.index(bus_rut) Vm = pfd.bus_Vm[bus_idx] Va = pfd.bus_Va[bus_idx] Pin = 0 Qin = 0 last_found = -1 PrateA = 0 PrateB = 0 while bus_rut in pfd.brc_from[last_found + 1:]: last_found = pfd.brc_from.index(bus_rut, last_found + 1) if pfd.brc_to[last_found] in bus_d1: Pin = Pin + pfd.brc_P[last_found] Qin = Qin + pfd.brc_Q[last_found] PrateA = PrateA + pfd.brc_rateA[last_found] PrateB = PrateB + pfd.brc_rateB[last_found] return Pin, Qin, PL, QL, PG, QG, PS, QS, Vm, Va, Ve, PrateA, PrateB, MW_ll, MW_ul, Mvar_ll, Mvar_ul, MVA_base, load_bus, gen_bus, shunt_bus def CalcSinglePortEqui(pfd, Pin, Qin, PL, QL, PG, QG, PS, QS, Vm, Va, Ve): Sin = numpy.complex(Pin, Qin) / pfd.basemva V1 = numpy.complex(Vm * math.cos(Va), Vm * math.sin(Va)) alpha = Sin / V1 * Ve Se = numpy.complex(PL - PG - PS, QL - QG - QS) / pfd.basemva a = alpha.real b = alpha.imag c = Se.real d = Se.imag k = Ve / Vm * numpy.abs(Sin) / numpy.abs(Se) Vae = math.asin(k * d / math.sqrt(a * a + b * b)) - math.atan(b / a) V2 = numpy.complex(Ve * math.cos(Vae), Ve * math.sin(Vae)) Z = (V1 - V2/k)/numpy.conj(Sin/V1) r = Z.real x = Z.imag return k, Vae, r, x def DoSpeInPsse(psspy, bus_root, bus_red, bus_d1, pfd, PL, QL, PG, QG, PS, QS, Ve, PrateA, PrateB, MW_ll, MW_ul, Mvar_ll, Mvar_ul, MVA_base, k, r, x): Ve_kV = 20.0 # set kV at equivalent bus # implement equivalent # remove all lines/transformers in reduced area bus_allred = bus_red bus_allred.append(bus_root) for brc_from, brc_to, brc_id in zip(pfd.brc_from, pfd.brc_to, pfd.brc_id): if brc_from in bus_allred: if brc_to in bus_allred: psspy.purgbrn(brc_from, brc_to, brc_id) # remove all gen in reduced area bus_allred.remove(bus_root) for gen_bus, gen_id in zip(pfd.gen_bus, pfd.gen_id): if gen_bus in bus_allred: psspy.purgmac(gen_bus, gen_id) # remove all loads in reduced area for load_bus, load_id in zip(pfd.load_bus, pfd.load_id): if load_bus in bus_allred: psspy.purgload(load_bus, load_id) # remove all shunts in reduced area for shunt_bus, shunt_id in zip(pfd.shunt_bus, pfd.shunt_id): if shunt_bus in bus_allred: psspy.purgshunt(shunt_bus, shunt_id) # remove all buses except for bus_root and bus_d1[0] in reduced area bus_removed = bus_red bus_removed.remove(bus_d1[0]) for busi in bus_removed: psspy.bsysinit(1) psspy.bsyso(1, int(busi)) psspy.extr(1, 0, [0, 0]) # change voltage level and bus type to PV if PG>0 if PG > 0: # if reduced area contains original slack bus, then change to Slack bus, otherwise, to a PV bus SL = pfd.bus_type.index(3) if pfd.bus_num[SL] in bus_removed: psspy.bus_chng_3(bus_d1[0], [3, psspy._i, psspy._i, psspy._i], [Ve_kV, psspy._f, psspy._f, psspy._f, psspy._f, psspy._f, psspy._f], psspy._s) else: psspy.bus_chng_3(bus_d1[0], [2, psspy._i, psspy._i, psspy._i], [Ve_kV, psspy._f, psspy._f, psspy._f, psspy._f, psspy._f, psspy._f],
<reponame>peerschuett/lattice_net import torch from torch.autograd import Function from torch import Tensor import sys from easypbr import Profiler from latticenet import HashTable from latticenet import Lattice import numpy as np import time import math import torch_scatter from latticenet_py.lattice.lattice_wrapper import LatticeWrapper #Just to have something close to the macros we have in c++ def profiler_start(name): if(Profiler.is_profiling_gpu()): torch.cuda.synchronize() Profiler.start(name) def profiler_end(name): if(Profiler.is_profiling_gpu()): torch.cuda.synchronize() Profiler.end(name) TIME_START = lambda name: profiler_start(name) TIME_END = lambda name: profiler_end(name) class SplatLattice(Function): @staticmethod def forward(ctx, lattice, positions, values): lattice.begin_splat(int(False)) splatting_indices, splatting_weights=lattice.splat_standalone(positions, values ) return lattice.values(), LatticeWrapper.wrap(lattice), splatting_indices, splatting_weights #Pytorch functions requires all the return values to be torch.Variables so we wrap the lattice into one @staticmethod def backward(ctx, grad_lattice_values, grad_lattice_structure): return None, None, None class DistributeLattice(Function): @staticmethod def forward(ctx, lattice, positions, values, reset_hashmap = True): lattice.begin_splat(reset_hashmap) distributed_lattice, distributed, splatting_indices, splatting_weights = lattice.distribute(positions, values, reset_hashmap) ctx.save_for_backward(splatting_indices, splatting_weights ) # ctx.lattice=lattice ctx.pos_dim=lattice.pos_dim() ctx.val_dim=lattice.val_dim() ctx.nr_positions=positions.shape[0] # print("nr_latticerts after distrivute is ", lattice.nr_lattice_vertices()) # print("values has size ", values.shape) # print("distributed has size ", distributed.shape) # print("splatting_indices has size ", splatting_indices.shape) # print("FORWARD---------------------- splatting_indices has max ", splatting_indices.max()) return LatticeWrapper.wrap(distributed_lattice), distributed, splatting_indices, splatting_weights @staticmethod def backward(ctx, grad_distributed, grad_indices, grad_weights): splatting_indices, splatting_weights =ctx.saved_tensors pos_dim=ctx.pos_dim val_dim=ctx.val_dim nr_positions=ctx.nr_positions #distributed is nr_positions *(pos_dim+1) X pos_dim + val_dim +1 #we get here only the part with the values which is nr_positions *(pos_dim+1) X val_dim grad_distributed_values=grad_distributed[:, pos_dim:pos_dim+val_dim] # grad_distributed_values = grad_distributed_values.view(nr_positions, (pos_dim+1)* val_dim) #so now we have for each positions, all the values that got distributed to the pos_dim+1 vertices, and now we just sum every row grad_distributed_values = grad_distributed_values.view(nr_positions, pos_dim+1, val_dim) grad_distributed_values = grad_distributed_values.permute(0,2,1) #makes it nr_positions, val_dim, pos_dim+1 grad_values=grad_distributed_values.sum(dim=2) #we sum over the pos_dim+1 vertices that we distributed over # # print("grad_distributed is ", grad_distributed) # # print("grad_distributed_values is ", grad_distributed_values) # # exit(1) # print("grad)distributed is ", grad_distributed.max()) # print("BACKWARD----------------------splatting_indices max is ", splatting_indices.max()) # print("grad_dsitributed_values has shape ", grad_distributed.shape) # grad_distributed=grad_distributed.view() # #the distribute just copied the value at certain rows of the distribute, so now we just gather it all together # indices_long=splatting_indices.long() # indices_long[indices_long<0]=0 #some indices may be -1 because they were not inserted into the hashmap, this will cause an error for scatter_max so we just set them to 0 # grad_values = torch_scatter.scatter_add(grad_distributed_values, indices_long, dim=0) ctx.lattice=None #release memory return None, None, grad_values, None, None, None, None class ExpandLattice(Function): @staticmethod def forward(ctx, lattice_values, lattice_structure, positions, point_multiplier, noise_stddev, expand_values): # lattice.begin_splat() # distributed, splatting_indices, splatting_weights = lattice.distribute(positions, values) lattice_structure.set_values(lattice_values) expanded_lattice=lattice_structure.expand(positions, point_multiplier, noise_stddev, expand_values) ctx.nr_values_original_lattice=lattice_structure.nr_lattice_vertices() return expanded_lattice.values(), LatticeWrapper.wrap(expanded_lattice) #Pytorch functions requires all the return values to be torch.Variables so we wrap the lattice into one @staticmethod def backward(ctx, grad_lattice_values, grad_lattice_structure): # print("wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww") nr_values_original_lattice=ctx.nr_values_original_lattice grad_original_lattice=grad_lattice_values[0:nr_values_original_lattice, :] return grad_original_lattice, None, None, None, None, None, None # return None, None, None, None, None, None, None class Im2RowLattice(Function): @staticmethod def forward(ctx, lattice_values, lattice, filter_extent, dilation, nr_filters): lattice.set_values(lattice_values) # if(lattice_neighbours_structure is not None): # lattice_neighbours_structure.set_values(lattice_neighbours_values) # convolved_lattice=lattice.convolve_im2row_standalone(filter_bank, dilation, lattice, False) lattice_rowified=lattice.im2row(lattice,filter_extent,dilation,False) # values=convolved_lattice.values() ctx.save_for_backward(lattice_rowified) ctx.lattice=lattice # ctx.lattice_neighbours_structure=lattice_neighbours_structure ctx.filter_extent=filter_extent ctx.dilation=dilation ctx.nr_filters=nr_filters ctx.val_dim= lattice.val_dim() return lattice_rowified @staticmethod def backward(ctx, grad_lattice_rowified): lattice=ctx.lattice # lattice_neighbours_structure=ctx.lattice_neighbours_structure filter_extent=ctx.filter_extent dilation=ctx.dilation val_dim=ctx.val_dim nr_filters=ctx.nr_filters lattice_rowified =ctx.saved_tensors grad_values=lattice.row2im(grad_lattice_rowified, dilation, filter_extent, nr_filters, lattice) ctx.lattice=0 #release this object so it doesnt leak # ctx.lattice_neighbours_structure=0 return grad_values, None, None, None, None class Im2RowIndicesLattice(Function): @staticmethod def forward(ctx, lattice_values, lattice, filter_extent, dilation, nr_filters): lattice.set_values(lattice_values) # convolved_lattice=lattice.convolve_im2row_standalone(filter_bank, dilation, lattice, False) lattice_rowified=lattice.im2rowindices(lattice,filter_extent,dilation,False) ctx.save_for_backward(lattice_rowified) ctx.lattice=lattice # ctx.lattice_neighbours_structure=lattice_neighbours_structure ctx.filter_extent=filter_extent ctx.dilation=dilation ctx.nr_filters=nr_filters ctx.val_dim= lattice.val_dim() return lattice_rowified @staticmethod def backward(ctx, grad_lattice_rowified): lattice=ctx.lattice # lattice_neighbours_structure=ctx.lattice_neighbours_structure filter_extent=ctx.filter_extent dilation=ctx.dilation val_dim=ctx.val_dim nr_filters=ctx.nr_filters lattice_rowified =ctx.saved_tensors grad_values=lattice.row2im(grad_lattice_rowified, dilation, filter_extent, nr_filters, lattice) ctx.lattice=0 #release this object so it doesnt leak # ctx.lattice_neighbours_structure=0 return grad_values, None, None, None, None class Im2RowLattice(Function): @staticmethod def forward(ctx, lattice_values, lattice, filter_extent, dilation, nr_filters): lattice.set_values(lattice_values) # if(lattice_neighbours_structure is not None): # lattice_neighbours_structure.set_values(lattice_neighbours_values) # convolved_lattice=lattice.convolve_im2row_standalone(filter_bank, dilation, lattice, False) lattice_rowified=lattice.im2row(lattice,filter_extent,dilation,False) # values=convolved_lattice.values() ctx.save_for_backward(lattice_rowified) ctx.lattice=lattice # ctx.lattice_neighbours_structure=lattice_neighbours_structure ctx.filter_extent=filter_extent ctx.dilation=dilation ctx.nr_filters=nr_filters ctx.val_dim= lattice.val_dim() return lattice_rowified @staticmethod def backward(ctx, grad_lattice_rowified): lattice=ctx.lattice # lattice_neighbours_structure=ctx.lattice_neighbours_structure filter_extent=ctx.filter_extent dilation=ctx.dilation val_dim=ctx.val_dim nr_filters=ctx.nr_filters lattice_rowified =ctx.saved_tensors grad_values=lattice.row2im(grad_lattice_rowified, dilation, filter_extent, nr_filters, lattice) ctx.lattice=0 #release this object so it doesnt leak # ctx.lattice_neighbours_structure=0 return grad_values, None, None, None, None class ConvIm2RowLattice(Function): @staticmethod def forward(ctx, lattice_values, lattice, filter_bank, dilation): lattice.set_values(lattice_values) # if(lattice_neighbours_structure is not None): # lattice_neighbours_structure.set_values(lattice_neighbours_values) convolved_lattice=lattice.convolve_im2row_standalone(filter_bank, dilation, lattice, False) # values=convolved_lattice.values() if lattice_values.requires_grad: ctx.save_for_backward(filter_bank, lattice_values ) ctx.lattice=lattice # ctx.lattice_neighbours_structure=lattice_neighbours_structure ctx.filter_extent=int(filter_bank.shape[0]/lattice_values.shape[1]) ctx.nr_filters= int(filter_bank.shape[1])#i hope it doesnt leak any memory ctx.dilation=dilation # if lattice_neighbours_structure!=None: # ctx.val_dim= lattice_neighbours_structure.val_dim() # else: ctx.val_dim= lattice.val_dim() # help(convolved_lattice_py) # help(torch.autograd.Variable) return convolved_lattice.values(), LatticeWrapper.wrap(convolved_lattice) #Pytorch functions requires all the return values to be torch.Variables so we wrap the lattice into one @staticmethod def backward(ctx, grad_lattice_values, grad_lattice_structure): lattice=ctx.lattice # lattice_neighbours_structure=ctx.lattice_neighbours_structure filter_extent=ctx.filter_extent nr_filters=ctx.nr_filters dilation=ctx.dilation val_dim=ctx.val_dim filter_bank, lattice_values =ctx.saved_tensors filter_extent=int(filter_bank.shape[0]/val_dim) #reconstruct lattice_rowified lattice.set_values(lattice_values) # if(lattice_neighbours_structure is not None): # lattice_neighbours_structure.set_values(lattice_neighbours_values) lattice_rowified= lattice.im2row(lattice, filter_extent, dilation, False) grad_filter=lattice_rowified.transpose(0,1).mm(grad_lattice_values) # #attempt 4 for grad_lattice filter_bank_backwards=filter_bank.transpose(0,1) # creates a nr_filters x filter_extent * val_fim filter_bank_backwards=filter_bank_backwards.view(nr_filters,filter_extent,val_dim) # nr_filters x filter_extent x val_fim filter_bank_backwards=filter_bank_backwards.transpose(0,1).contiguous() #makes it filter_extent x nr_filters x val_fim #TODO the contigous may noy be needed because the reshape does may do a contigous if needed or may also just return a view, both work filter_bank_backwards=filter_bank_backwards.reshape(filter_extent*nr_filters, val_dim) lattice.set_values(grad_lattice_values) grad_lattice=lattice.convolve_im2row_standalone(filter_bank_backwards, dilation, lattice, True) grad_lattice_values=grad_lattice.values() ctx.lattice=0 #release this object so it doesnt leak # ctx.lattice_neighbours_structure=0 return grad_lattice_values, None, grad_filter, None, None, None, None, None, None, None class CoarsenLattice(Function): @staticmethod def forward(ctx, lattice_fine_values, lattice_fine_structure, filter_bank, coarsened_lattice=None): #Can offer a precomputed coarsened lattice lattice_fine_structure.set_values(lattice_fine_values) #create a structure for the coarse lattice, the values of the coarse vertices will be zero positions=lattice_fine_structure.positions() # print("fine lattice has keys", lattice_fine_structure.keys()) #coarsened_lattice_py=lattice_fine_structure.create_coarse_verts() # print("lattice fine structure has indices", lattice_fine_structure.splatting_indices()) if (coarsened_lattice==None): coarsened_lattice=lattice_fine_structure.create_coarse_verts_naive(positions) #convolve at this lattice vertices with the neighbours from lattice_fine dilation=1 convolved_lattice=coarsened_lattice.convolve_im2row_standalone(filter_bank, dilation, lattice_fine_structure, False) if lattice_fine_values.requires_grad: ctx.save_for_backward(filter_bank, lattice_fine_values ) ctx.coarsened_lattice=coarsened_lattice ctx.lattice_fine_structure=lattice_fine_structure ctx.filter_extent=int(filter_bank.shape[0]/lattice_fine_values.shape[1]) ctx.nr_filters= int(filter_bank.shape[1])#i hope it doesnt leak any memory ctx.dilation=dilation ctx.val_dim= lattice_fine_structure.val_dim() return convolved_lattice.values(), LatticeWrapper.wrap(convolved_lattice) #Pytorch functions requires all the return values to be torch.Variables so we wrap the lattice into one @staticmethod def backward(ctx, grad_lattice_values, grad_lattice_structure): coarsened_lattice=ctx.coarsened_lattice lattice_fine_structure=ctx.lattice_fine_structure filter_extent=ctx.filter_extent nr_filters=ctx.nr_filters dilation=ctx.dilation val_dim=ctx.val_dim filter_bank, lattice_fine_values =ctx.saved_tensors filter_extent=int(filter_bank.shape[0]/val_dim) #reconstruct lattice_rowified lattice_fine_structure.set_values(lattice_fine_values) lattice_rowified= coarsened_lattice.im2row(lattice_fine_structure, filter_extent, dilation, False) grad_filter=lattice_rowified.transpose(0,1).mm(grad_lattice_values) filter_bank_backwards=filter_bank.transpose(0,1) # creates a nr_filters x filter_extent * val_fim filter_bank_backwards=filter_bank_backwards.view(nr_filters,filter_extent,val_dim) # nr_filters x filter_extent x val_fim filter_bank_backwards=filter_bank_backwards.transpose(0,1).contiguous() #makes it filter_extent x nr_filters x val_fim #TODO the contigous may noy be needed because the reshape does may do a contigous if needed or may also just return a view, both work filter_bank_backwards=filter_bank_backwards.reshape(filter_extent*nr_filters, val_dim) coarsened_lattice.set_values(grad_lattice_values) # lattice_fine_structure.set_val_dim(nr_filters) #setting val full dim to nr of filters because we will convolve the values of grad_lattice values and those have a row of size nr_filters #one hast o convolve at the fine positions, having the neighbour as the coarse ones because they are the ones with the errors grad_lattice_py=lattice_fine_structure.convolve_im2row_standalone(filter_bank_backwards, dilation, coarsened_lattice, True) grad_lattice=grad_lattice_py.values() #release ctx.coarsened_lattice=0 ctx.lattice_fine_structure=0 return grad_lattice, None, grad_filter, None, None, None #THe good one class FinefyLattice(Function): @staticmethod def forward(ctx, lattice_coarse_values, lattice_coarse_structure, lattice_fine_structure, filter_bank ): lattice_coarse_structure.set_values(lattice_coarse_values) # lattice_fine_structure.set_val_dim(lattice_coarse_structure.val_dim()) dilation=1 convolved_lattice=lattice_fine_structure.convolve_im2row_standalone(filter_bank, dilation,lattice_coarse_structure, False) # values=convolved_lattice_py.values() # convolved_lattice_py.set_values(values) if lattice_coarse_values.requires_grad: ctx.save_for_backward(filter_bank, lattice_coarse_values) ctx.lattice_fine_structure=convolved_lattice ctx.lattice_coarse_structure=lattice_coarse_structure ctx.filter_extent=int(filter_bank.shape[0]/lattice_coarse_values.shape[1]) ctx.nr_filters= int(filter_bank.shape[1])#i hope it doesnt leak any memory ctx.dilation=dilation ctx.val_dim= lattice_coarse_structure.val_dim() return convolved_lattice.values(), LatticeWrapper.wrap(convolved_lattice) #Pytorch functions requires all the return values to be torch.Variables so we wrap the lattice into one @staticmethod def backward(ctx, grad_lattice_values, grad_lattice_structure): # coarsened_lattice_py=ctx.coarsened_lattice_py lattice_fine_structure=ctx.lattice_fine_structure lattice_coarse_structure=ctx.lattice_coarse_structure filter_extent=ctx.filter_extent nr_filters=ctx.nr_filters dilation=ctx.dilation val_dim=ctx.val_dim filter_bank, lattice_coarse_values =ctx.saved_tensors filter_extent=int(filter_bank.shape[0]/val_dim) #reconstruct lattice_rowified lattice_coarse_structure.set_values(lattice_coarse_values) lattice_rowified= lattice_fine_structure.im2row(lattice_coarse_structure, filter_extent, dilation, False) grad_filter=lattice_rowified.transpose(0,1).mm(grad_lattice_values) filter_bank_backwards=filter_bank.transpose(0,1) # creates a nr_filters x filter_extent * val_fim filter_bank_backwards=filter_bank_backwards.view(nr_filters,filter_extent,val_dim) # nr_filters x filter_extent x val_fim filter_bank_backwards=filter_bank_backwards.transpose(0,1).contiguous() #makes it filter_extent x nr_filters x val_fim #TODO the contigous may noy be needed because the reshape does may do a contigous if needed or may also just return a view, both work filter_bank_backwards=filter_bank_backwards.reshape(filter_extent*nr_filters, val_dim) # print("finefy backwards: saved for backwards a coarsened lattice py with nr of keys", coarsened_lattice_py.nr_lattice_vertices()) lattice_fine_structure.set_values(grad_lattice_values) # lattice_coarse_structure.set_val_dim(lattice_fine_structure.val_dim()) #setting val full dim to nr of filters because we will convolve the
import random #Lyrics from the top 100 songs of 2018 billboardtop = ["I've been me since Scarlett Road", "I never knew you were the someone waiting for me", "We got nothing but time", "If it cost a million, that's me", "I feel just like a rockstar", "Can't really trust nobody with all this jewelry on you", "You gotta believe me when I tell you", "So why don't you just meet me in the middle?", "Say you'll never ever leave", "Maybe it's 6:45", "How can I explain myself?", "Easier said than done", "Life, it goes on, what can you do?", "You know it", "Can't control my anxiety", "Talkin' in my sleep at night", "Who am I? Someone that's afraid to let go", "Let go of the wheel", "Underneath the sunrise", "Right now, I'm in a state of mind", "This feel incredible", "I was dreaming of bigger things", "Look alive", "My reputation's never been worse", "I just took what they gave me", "I've told you one, two, three, four, five, six thousand times", "I'm gonna walk away", "And I got the fire", "I wish that I could say I'm proud", "Concentrate", "Colorful hair, don't care", "I can hang with anybody", "I'm a rebel just for kicks, now", "Face all your fears", "Burning through the hours talking", "Lately our conversations end like it's the last goodbye", "Everybody hoping they could be the one", "So do not test me", "You said it", "It's all the same", "Every single word builds up to this moment", "Cut the lights", "Take my shoes and walk a mile", "No wedding ring", "I'd never lie", "Sometimes I feel like giving up", "This maybe the night that my dreams might let me know", "Designer clothes, fashion shows", "You must think that I'm new to this", "Give me a run for my money", "This is America", "I am what I am", "This is perfect", "Let's find out and see", "Why am I yelling?", "That's really all", "I don't even understand", "Can we talk for a moment?", "I wanna get married", "All for you", "I go insane", "Nobody else can relate", "You had it then you lost it", "The tables have turned", "How long has this been goin' on?", "Heaven only knows where you've been", "While we're young", "The feeling won't let me sleep", "Will the stars align?", "Make it easy", "Come on now, follow my lead", "Livin' too fast", "One night'll change your whole life", "I was only 8 years old watching Nick at Nite", "Just enjoy the moment" "I can see it all right now", "We can do anything if we put our minds to it", "You gave me comfort", "Not your reason, not your future", "I want to change my mind", "I'll come back", "We used to live on Instagram", "Wait a minute", "I'm just trying to shed a little light", "Everybody says say something", "This is real life", "We've got a long, long way to go", "Look good in the moonlight", "Am I lucky or not?", "We are clear", "Face every problem", "It's too good to be true", "It's powerful", "I can't stand it", "If you made up your mind, then make it", "You just might be doing whatever it takes to drown out the noise", "Speak up", "Your time is up", "But we never stop", "Send a prayer to the ones up above" ] #Lyrics from the top 100 songs of 1996 topninetysix = ["Come and find me", "Sorry I never told you", "You were my voice when I couldn't speak", "Nobody knows it but me", "I will not stand in your way", "I'll turn right back around", "That's how we roll", "Feels like I'm standing in a timeless dream", "I can touch the sky", "You got me twisted", "I think I can", "You always were two steps ahead, of everyone", "It's like rain on your wedding day", "For every win, someone must fail", "I know we're headed somewhere", "I have but one concern", "How do you feel?", "It's all coming back to me", "If I could reach the stars", "For as long as I can remember", "Are you for sure?", "I'm one of the chosen few", "I know everything", "I still can't turn away", "They say they're better than you and you agree", "They don't like the game we play", "It was so, so crazy of me", "Am I very far now?", "I thought you should know", "Nobody callin' on the phone", "They say I must be one of the wonders", "Eleven years of sacrifice", "Too much television watchin' got me chasin' dreams", "I thought I told you that we won't stop", "Keep it on the down low", "So what's my chance", "I travel the world", "With the memories that can't be erased", "I guess I was wrong", "Help I need somebody", "Are we listening?", "Please excuse, if I come across rude", "I don't know where to start", "Faster than the speed of sound", "I still feel the same", "Oh my whole life has been much better than ever before", "My whole world changes", "Count on me through thick and thin", "I'm in heaven", "Can you teach me about tomorrow", "You'll see", "I almost cried 'cause it was so beautiful", "We'll be famous on TV", "I could not see all the beauty and wonder wrapping around me", "It's kinda funny", "Today is gonna be the day", "Don't ever disrespect", "But to my great surprise", "Rolling down the strip", "There is something amiss", "Press rewind", "I made my way into the night", "Hold on tight and don't let go", "Tonight's the night", "Children know, understand, and believe, that there will be more and more, love and peace, for eternity, love and peace, for everybody", "This tape will self-destruct in 5 seconds", "Everbody wants to live like they want to live", "'Cause I'm just a girl", "She wouldn't have to worry", "I want them to know", "I saw you first", "Just once if I have the chance", "Don't be afraid", "And I wonder who will walk with me when I get there", "It can't be that bad", "This I pray", "I can rock a party like nobody", "Just by mistake", "To hell with the consequence", "Now I know", "Don't leave me out in the rain", "Dancing free until the morning light", "You remind me of something", "I've seen the world, been to many places", "It will never end", "Let's make a date", "It's all up to you", "You've got all I need", "Push me to the limit", "No complications just attitude", "Never miss what you never had", "And the fear still shakes me", "Now I see why don't nobody leave", "You can do what you want just seize the day", "Got a fist of pure emotion", "I fight back in my mind", "Would you feel better?", "Couldn't stand him", "Sometimes you're crazy and you wonder why", "Don't be so hard on yourself" ] #Lyrics of popular songs from numerous decades and genres ofalltime = ["Well, I’ve been afraid of changing", "Cause I’ve built my life around you", "But time makes you bolder", "Even children get older", "And I’m getting older too", "I decided long ago, never to walk in anyone’s shadows", "If I fail, if I succeed", "At least I’ll live as I believe", "No matter what they take from me", "They can’t take away my dignity" "Some will win, some will lose", "Some were born to sing the blues", "The movie never ends", "It goes on and on and on and on", "If I leave here tomorrow, would you still remember me?", "If we weren't all crazy we would go insane", "Take my hand, take my whole life too", "All it'd take is one flight, we'd be in the same time zone", "Had
as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, rows, cols) # TF input shape: (samples, rows, cols, input_depth) x = x.dimshuffle((0, 3, 1, 2)) return x def _preprocess_conv3d_input(x, data_format): """Transpose and cast the input before the conv3d. # Arguments x: input tensor. data_format: string, `"channels_last"` or `"channels_first"`. # Returns A tensor. """ if data_format == 'channels_last': # TF uses the last dimension as channel dimension, # instead of the 2nd one. # TH input shape: (samples, input_depth, rows, cols, slices) # TF input shape: (samples, rows, cols, slices, input_depth) x = x.dimshuffle((0, 4, 1, 2, 3)) return x def _preprocess_conv2d_kernel(kernel, data_format): # As of Keras 2.0.0, all kernels are normalized # on the format `(rows, cols, input_depth, depth)`, # independently of `data_format`. # Theano expects `(depth, input_depth, rows, cols)`. kernel = kernel.dimshuffle((3, 2, 0, 1)) return kernel def _preprocess_conv2d_depthwise_kernel(kernel, kernel_shape, data_format): # As of Keras 2.0.0, all kernels are normalized # on the format `(rows, cols, input_depth, depth)`, # independently of `data_format`. # Theano expects `(input_depth * depth, 1, rows, cols)` # for depthwise convolution. kernel = kernel[::-1, ::-1, :, :] kernel = kernel.dimshuffle((2, 3, 0, 1)) kernel = reshape(kernel, kernel_shape) return kernel def _preprocess_conv3d_kernel(kernel, data_format): # As of Keras 2.0.0, all kernels are normalized # on the format `(space, input_depth, depth)`, # independently of `data_format`. # Theano expects `(depth, input_depth, space)`. kernel = kernel.dimshuffle((4, 3, 0, 1, 2)) return kernel def _preprocess_padding(padding): """Convert keras' padding to theano's padding. # Arguments padding: string, `"same"` or `"valid"`. # Returns a string, `"SAME"` or `"VALID"`. # Raises ValueError: if `padding` is invalid. """ if padding == 'same': th_padding = 'half' elif padding == 'valid': th_padding = 'valid' elif padding == 'full': th_padding = 'full' else: raise ValueError('Border mode not supported:', str(padding)) return th_padding def _preprocess_conv2d_image_shape(image_shape, data_format): # Theano might not accept long type def int_or_none(value): try: return int(value) except TypeError: return None if data_format == 'channels_last': if image_shape: image_shape = transpose_shape(image_shape, 'channels_first', spatial_axes=(1, 2)) if image_shape is not None: image_shape = tuple(int_or_none(v) for v in image_shape) return image_shape def _preprocess_conv3d_volume_shape(volume_shape, data_format): # Theano might not accept long type def int_or_none(value): try: return int(value) except TypeError: return None if data_format == 'channels_last': if volume_shape: volume_shape = (volume_shape[0], volume_shape[4], volume_shape[1], volume_shape[2], volume_shape[3]) if volume_shape is not None: volume_shape = tuple(int_or_none(v) for v in volume_shape) return volume_shape def _preprocess_conv2d_filter_shape(filter_shape, data_format): # Theano might not accept long type def int_or_none(value): try: return int(value) except TypeError: return None if filter_shape: filter_shape = (filter_shape[3], filter_shape[2], filter_shape[0], filter_shape[1]) if filter_shape is not None: filter_shape = tuple(int_or_none(v) for v in filter_shape) return filter_shape def _preprocess_conv2d_depthwise_filter_shape(filter_shape, data_format): # Theano might not accept long type def int_or_none(value): try: return int(value) except TypeError: return None if filter_shape: filter_shape = (filter_shape[3] * filter_shape[2], 1, filter_shape[0], filter_shape[1]) if filter_shape is not None: filter_shape = tuple(int_or_none(v) for v in filter_shape) return filter_shape def _preprocess_conv3d_filter_shape(filter_shape, data_format): # Theano might not accept long type def int_or_none(value): try: return int(value) except TypeError: return None if filter_shape: filter_shape = (filter_shape[4], filter_shape[3], filter_shape[0], filter_shape[1], filter_shape[2]) if filter_shape is not None: filter_shape = tuple(int_or_none(v) for v in filter_shape) return filter_shape def _postprocess_conv2d_output(conv_out, x, padding, kernel_shape, strides, data_format): if padding == 'same': if kernel_shape[2] % 2 == 0: i = (x.shape[2] + strides[0] - 1) // strides[0] conv_out = conv_out[:, :, :i, :] if kernel_shape[3] % 2 == 0: i = (x.shape[3] + strides[1] - 1) // strides[1] conv_out = conv_out[:, :, :, :i] if data_format == 'channels_last': conv_out = conv_out.dimshuffle((0, 2, 3, 1)) return conv_out def _postprocess_conv3d_output(conv_out, x, padding, kernel_shape, strides, data_format): if padding == 'same': if kernel_shape[2] % 2 == 0: i = (x.shape[2] + strides[0] - 1) // strides[0] conv_out = conv_out[:, :, :i, :, :] if kernel_shape[3] % 2 == 0: i = (x.shape[3] + strides[1] - 1) // strides[1] conv_out = conv_out[:, :, :, :i, :] if kernel_shape[4] % 2 == 0: i = (x.shape[4] + strides[2] - 1) // strides[2] conv_out = conv_out[:, :, :, :, :i] if data_format == 'channels_last': conv_out = conv_out.dimshuffle((0, 2, 3, 4, 1)) return conv_out def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): """1D convolution. # Arguments x: Tensor or variable. kernel: kernel tensor. strides: stride integer. padding: string, `"same"`, `"causal"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: integer dilate rate. # Returns A tensor, result of 1D convolution. # Raises ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ data_format = normalize_data_format(data_format) kernel_shape = int_shape(kernel) if padding == 'causal': # causal (dilated) convolution: if not kernel_shape: raise AttributeError('Causal padding requires kernel._keras_shape set.') left_pad = dilation_rate * (kernel_shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' shape = int_shape(x) if data_format == 'channels_last': # original shape: (batch, length, input_dim) # add dim to x to have (batch, length, 1, input_dim) x = expand_dims(x, 2) # update x._keras_shape if shape is not None: x._keras_shape = (shape[0], shape[1], 1, shape[2]) else: # original shape: (batch, input_dim, length) # add dim to x to have (batch, input_dim, length, 1) x = expand_dims(x, 3) # update x._keras_shape if shape is not None: x._keras_shape = (shape[0], shape[1], shape[2], 1) # update dilation rate, strides dilation_rate = (dilation_rate, 1) strides = (strides, 1) # add dim to kernel (always same format independently of data_format) # i.e. (rows, 1, input_depth, depth) kernel = expand_dims(kernel, 1) output = conv2d(x, kernel, strides=strides, padding=padding, data_format=data_format, dilation_rate=dilation_rate) # remove added dim if data_format == 'channels_last': output = squeeze(output, 2) else: output = squeeze(output, 3) return output def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D convolution. # Arguments x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. Whether to use Theano or TensorFlow/CNTK data format for inputs/kernels/outputs. dilation_rate: tuple of 2 integers. # Returns A tensor, result of 2D convolution. # Raises ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ data_format = normalize_data_format(data_format) image_shape = _preprocess_conv2d_image_shape(int_shape(x), data_format) kernel_shape = int_shape(kernel) if kernel_shape is None: kernel_shape = kernel.eval().shape # in case of a shared variable kernel_shape = _preprocess_conv2d_filter_shape(kernel_shape, data_format) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) th_padding = _preprocess_padding(padding) conv_out = T.nnet.conv2d(x, kernel, border_mode=th_padding, subsample=strides, input_shape=image_shape, filter_shape=kernel_shape, filter_dilation=dilation_rate) conv_out = _postprocess_conv2d_output(conv_out, x, padding, kernel_shape, strides, data_format) return conv_out def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D deconvolution (transposed convolution). # Arguments x: Tensor or variable. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. strides: strides tuple. padding: string, "same" or "valid". data_format: "channels_last" or "channels_first". Whether to use Theano or TensorFlow data format in inputs/kernels/outputs. dilation_rate: tuple of 2 integers. # Raises ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ flip_filters = False data_format = normalize_data_format(data_format) if data_format == 'channels_last': output_shape = (output_shape[0], output_shape[3], output_shape[1], output_shape[2]) kernel_shape = int_shape(kernel) if kernel_shape is None: kernel_shape = kernel.eval().shape # in case of a shared variable if padding == 'same' and kernel_shape[0] % 2 == 0: raise ValueError('In `Conv2DTranspose`, with padding mode `same`, ' 'even kernel sizes are not supported with Theano. ' 'You can set `kernel_size` to an odd number.') kernel_shape = _preprocess_conv2d_filter_shape(kernel_shape, data_format) x = _preprocess_conv2d_input(x, data_format) kernel = _preprocess_conv2d_kernel(kernel, data_format) th_padding = _preprocess_padding(padding) op = T.nnet.abstract_conv.AbstractConv2d_gradInputs( imshp=None, kshp=kernel_shape, subsample=strides, border_mode=th_padding, filter_flip=not flip_filters, filter_dilation=dilation_rate) conv_out = op(kernel, x, output_shape[2:]) conv_out = _postprocess_conv2d_output(conv_out, x, padding, kernel_shape, strides, data_format) return conv_out def separable_conv1d(x, depthwise_kernel, pointwise_kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): """1D convolution with separable filters. # Arguments x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. strides: strides integer. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: integer dilation rate. # Returns Output tensor. # Raises ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`. """ data_format = normalize_data_format(data_format) if isinstance(strides, int): strides = (strides,) if isinstance(dilation_rate, int): dilation_rate = (dilation_rate,) if data_format == 'channels_last': spatial_start_dim = 2 else: spatial_start_dim = 3 x = expand_dims(x,
from math import sqrt from typing import Optional, Callable, Union, Dict, List import dask.array as da import dask.dataframe as dd import numpy as np import pandas as pd import xarray as xr from xarray import DataArray from xrspatial.utils import ngjit def _stats_count(data): if isinstance(data, np.ndarray): # numpy case stats_count = np.ma.count(data) else: # dask case stats_count = data.size - da.ma.getmaskarray(data).sum() return stats_count _DEFAULT_STATS = dict( mean=lambda z: z.mean(), max=lambda z: z.max(), min=lambda z: z.min(), sum=lambda z: z.sum(), std=lambda z: z.std(), var=lambda z: z.var(), count=lambda z: _stats_count(z), ) def _to_int(numeric_value): # convert an integer in float type to integer type # if not an integer, return the value itself if float(numeric_value).is_integer(): return int(numeric_value) return numeric_value def _zone_cat_data( zones, values, zone_id, nodata_values, cat=None, cat_id=None ): # array backend if isinstance(zones.data, np.ndarray): array_module = np elif isinstance(zones.data, da.Array): array_module = da if len(values.shape) == 2: # 2D case conditions = ( (zones.data != zone_id) | ~np.isfinite(values.data) # mask out nan, inf | (values.data == nodata_values) # mask out nodata_values ) if cat is not None: conditions |= values.data != cat zone_cat_data = array_module.ma.masked_where(conditions, values.data) else: # 3D case cat_data = values[cat_id].data cat_masked_data = array_module.ma.masked_invalid(cat_data) zone_cat_data = array_module.ma.masked_where( ( (zones.data != zone_id) | (cat_data == nodata_values) ), cat_masked_data ) return zone_cat_data def _stats( zones: xr.DataArray, values: xr.DataArray, unique_zones: List[int], stats_funcs: List, nodata_values: Union[int, float], ) -> Dict: stats_dict = {} # zone column stats_dict["zone"] = unique_zones # stats columns for stats in stats_funcs: stats_dict[stats] = [] for zone_id in unique_zones: # get zone values zone_values = _zone_cat_data(zones, values, zone_id, nodata_values) for stats in stats_funcs: stats_func = stats_funcs.get(stats) if not callable(stats_func): raise ValueError(stats) stats_dict[stats].append(stats_func(zone_values)) unique_zones = list(map(_to_int, unique_zones)) stats_dict["zone"] = unique_zones return stats_dict def _stats_dask( zones: xr.DataArray, values: xr.DataArray, zone_ids: List[Union[int, float]], stats_funcs: Dict, nodata_zones: Union[int, float], nodata_values: Union[int, float], ) -> pd.DataFrame: if zone_ids is None: # no zone_ids provided, find ids for all zones # precompute unique zones unique_zones = da.unique(zones.data[da.isfinite(zones.data)]).compute() # do not consider zone with nodata values unique_zones = sorted(list(set(unique_zones) - set([nodata_zones]))) else: unique_zones = np.array(zone_ids) stats_dict = _stats( zones, values, unique_zones, stats_funcs, nodata_values ) stats_dict = { stats: da.stack(zonal_stats, axis=0) for stats, zonal_stats in stats_dict.items() } # generate dask dataframe stats_df = dd.concat( [dd.from_dask_array(stats) for stats in stats_dict.values()], axis=1 ) # name columns stats_df.columns = stats_dict.keys() stats_df.set_index("zone") return stats_df @ngjit def _strides(flatten_zones, zone_ids): num_elements = flatten_zones.shape[0] strides = np.zeros(len(zone_ids), dtype=np.int32) zone_count = 0 for i in range(num_elements - 1): if (flatten_zones[i] != flatten_zones[i + 1]): if flatten_zones[i] in zone_ids: strides[zone_count] = i zone_count += 1 # check last elements if flatten_zones[num_elements - 1] != strides[zone_count - 1]: if flatten_zones[num_elements - 1] in zone_ids: strides[zone_count] = num_elements - 1 return strides def _stats_numpy( zones: xr.DataArray, values: xr.DataArray, zone_ids: List[Union[int, float]], stats_funcs: Dict, nodata_zones: Union[int, float], nodata_values: Union[int, float], ) -> pd.DataFrame: if zone_ids is None: # no zone_ids provided, find ids for all zones # do not consider zone with nodata values unique_zones = np.unique(zones.data[np.isfinite(zones.data)]) unique_zones = sorted(list(set(unique_zones) - set([nodata_zones]))) else: unique_zones = zone_ids unique_zones = list(map(_to_int, unique_zones)) unique_zones = np.asarray(unique_zones) stats_dict = {} # zone column stats_dict["zone"] = unique_zones # stats columns for stats in stats_funcs: stats_dict[stats] = [] flatten_zones = zones.data.flatten() sorted_indices = np.argsort(flatten_zones) sorted_zones = flatten_zones[sorted_indices] values_by_zones = values.data.flatten()[sorted_indices] # exclude nans from calculation # flatten_zones is already sorted, NaN elements (if any) are at the end # of the array, removing them will not affect data before them sorted_zones = sorted_zones[np.isfinite(sorted_zones)] zone_breaks = _strides(sorted_zones, unique_zones) start = 0 for i in range(len(unique_zones)): end = zone_breaks[i] + 1 zone_values = values_by_zones[start:end] # filter out non-finite and nodata_values zone_values = zone_values[ np.isfinite(zone_values) & (zone_values != nodata_values)] for stats in stats_funcs: stats_func = stats_funcs.get(stats) if not callable(stats_func): raise ValueError(stats) stats_dict[stats].append(stats_func(zone_values)) start = end stats_df = pd.DataFrame(stats_dict) stats_df.set_index("zone") return stats_df def stats( zones: xr.DataArray, values: xr.DataArray, zone_ids: Optional[List[Union[int, float]]] = None, stats_funcs: Union[Dict, List] = [ "mean", "max", "min", "sum", "std", "var", "count", ], nodata_zones: Optional[Union[int, float]] = None, nodata_values: Union[int, float] = None, ) -> Union[pd.DataFrame, dd.DataFrame]: """ Calculate summary statistics for each zone defined by a zone dataset, based on values aggregate. A single output value is computed for every zone in the input zone dataset. This function currently supports numpy backed, and dask with numpy backed xarray DataArray. Parameters ---------- zones : xr.DataArray zones is a 2D xarray DataArray of numeric values. A zone is all the cells in a raster that have the same value, whether or not they are contiguous. The input `zones` raster defines the shape, values, and locations of the zones. An integer field in the input `zones` DataArray defines a zone. values : xr.DataArray values is a 2D xarray DataArray of numeric values (integers or floats). The input `values` raster contains the input values used in calculating the output statistic for each zone. zone_ids : list of ints, or floats List of zones to be included in calculation. If no zone_ids provided, all zones will be used. stats_funcs : dict, or list of strings, default=['mean', 'max', 'min', 'sum', 'std', 'var', 'count']) The statistics to calculate for each zone. If a list, possible choices are subsets of the default options. In the dictionary case, all of its values must be callable. Function takes only one argument that is the `values` raster. The key become the column name in the output DataFrame. nodata_zones: int, float, default=None Nodata value in `zones` raster. Cells with `nodata_zones` do not belong to any zone, and thus excluded from calculation. nodata_values: int, float, default=None Nodata value in `values` raster. Cells with `nodata_values` do not belong to any zone, and thus excluded from calculation. Returns ------- stats_df : Union[pandas.DataFrame, dask.dataframe.DataFrame] A pandas DataFrame, or a dask DataFrame where each column is a statistic and each row is a zone with zone id. Examples -------- .. plot:: :include-source: import numpy as np import xarray as xr from xrspatial.zonal import stats height, width = 10, 10 # Values raster values = xr.DataArray(np.arange(height * width).reshape(height, width)) # Zones raster zones = xr.DataArray(np.zeros(height * width).reshape(height, width)) zones[:5, :5] = 0 zones[:5, 5:] = 10 zones[5:, :5] = 20 zones[5:, 5:] = 30 .. sourcecode:: python >>> # Calculate Stats >>> stats_df = stats(zones=zones, values=values) >>> print(stats_df) zone mean max min sum std var count 0 0 22.0 44 0 550 14.21267 202.0 25 1 10 27.0 49 5 675 14.21267 202.0 25 2 20 72.0 94 50 1800 14.21267 202.0 25 3 30 77.0 99 55 1925 14.21267 202.0 25 >>> # Custom Stats >>> custom_stats ={'double_sum': lambda val: val.sum()*2} >>> custom_stats_df = stats(zones=zones, values=values, stats_funcs=custom_stats) >>> print(custom_stats_df) zone double_sum 0 0 1100 1 10 1350 2 20 3600 3 30 3850 >>> # Calculate Stats with dask backed xarray DataArrays >>> dask_stats_df = stats(zones=dask_zones, values=dask_values) >>> print(type(dask_stats_df)) <class 'dask.dataframe.core.DataFrame'> >>> print(dask_stats_df.compute()) zone mean max min sum std var count 0 0 22.0 44 0 550 14.21267 202.0 25 1 10 27.0 49 5 675 14.21267 202.0 25 2 20 72.0 94 50 1800 14.21267 202.0 25 3 30 77.0 99 55 1925 14.21267 202.0 25 >>> # Custom Stats with dask backed xarray DataArrays >>> dask_custom_stats ={'double_sum': lambda val: val.sum()*2} >>> dask_custom_stats_df = stats( >>> zones=dask_zones, values=dask_values, stats_funcs=custom_stats >>> ) >>> print(type(dask_custom_stats_df)) <class 'dask.dataframe.core.DataFrame'> >>> print(dask_custom_stats_df.compute()) zone double_sum 0 0 1100 1 10 1350 2 20 3600 3 30 3850 """ if zones.shape != values.shape: raise ValueError("`zones` and `values` must have same shape.") if not ( issubclass(zones.data.dtype.type, np.integer) or issubclass(zones.data.dtype.type, np.floating) ): raise ValueError("`zones` must be an array of integers.") if not ( issubclass(values.data.dtype.type, np.integer) or issubclass(values.data.dtype.type, np.floating) ): raise ValueError("`values` must be an array of integers or floats.") if isinstance(stats_funcs, list): # create a dict of stats stats_funcs_dict = {} for stats in stats_funcs: func = _DEFAULT_STATS.get(stats, None) if func is None: err_str = f"Invalid stat name. {stats} option not supported." raise ValueError(err_str) stats_funcs_dict[stats] = func elif isinstance(stats_funcs, dict): stats_funcs_dict = stats_funcs.copy() if isinstance(values.data, np.ndarray): # numpy
<filename>class_hierarchy.py import numpy as np import types from sklearn.metrics import average_precision_score class ClassHierarchy(object): """ Represents a class taxonomy and can be used to find Lowest Common Subsumers or to compute class similarities. """ def __init__(self, parents, children): """ Initializes a new ClassHierarchy. parents - Dictionary mapping class labels to lists of parent class labels in the hierarchy. children - Dictionary mapping class labels to lists of children class labels in the hierarchy. """ object.__init__(self) self.parents = parents self.children = children self.nodes = set(self.parents.keys()) | set(self.children.keys()) self._depths = { False : {}, True : {} } self._hyp_depth_cache = { False : {}, True : {} } self._hyp_dist_cache = {} self._lcs_cache = {} self._wup_cache = {} self._compute_heights() self.max_height = max(self.heights.values()) def _compute_heights(self): """ Computes the heights of all nodes in the hierarchy. """ def height(id): if id not in self.heights: self.heights[id] = 1 + max((height(child) for child in self.children[id]), default = -1) if id in self.children else 0 return self.heights[id] self.heights = {} for node in self.nodes: height(node) def is_tree(self): """ Determines whether the hierarchy is a tree. Note that some popular hierarchies such as WordNet are not trees, but allow nodes to have multiple parents. """ return all(len(parents) <= 1 for parents in self.parents.values()) def all_hypernym_depths(self, id, use_min_depth = False): """ Determines all hypernyms of a given element (including the element itself) along with their depth in the hierarchy. id - ID of the element. use_min_depth - If set to `True`, use the shortest path from the root to an element to determine its depth, otherwise the longest path. Returns: dictionary mapping hypernym ids to depths. Root nodes have depth 1. """ if id not in self._hyp_depth_cache[use_min_depth]: depths = {} if (id not in self.parents) or (len(self.parents[id]) == 0): depths[id] = 1 # root nodes have depth 1 else: for parent in self.parents[id]: for hyp, depth in self.all_hypernym_depths(parent, use_min_depth).items(): depths[hyp] = depth depths[id] = 1 + min(depths[p] for p in self.parents[id]) if use_min_depth else 1 + max(depths[p] for p in self.parents[id]) self._hyp_depth_cache[use_min_depth][id] = depths self._depths[use_min_depth][id] = depths[id] return self._hyp_depth_cache[use_min_depth][id] def all_hypernym_distances(self, id): """ Determines all hypernyms of a given element (including the element itself) along with their distance from the element. id - ID of the element. Returns: dictionary mapping hypernym ids to distances, measured in the minimum length of edges between two nodes. """ if id not in self._hyp_dist_cache: distances = { id : 0 } if id in self.parents: for parent in self.parents[id]: for hyp, dist in self.all_hypernym_distances(parent).items(): if (hyp not in distances) or (dist + 1 < distances[hyp]): distances[hyp] = dist + 1 self._hyp_dist_cache[id] = distances return self._hyp_dist_cache[id] def root_paths(self, id): """ Determines all paths from a given element (excluding the element itself) to a root node in the hierarchy. id - ID of the element. Returns: list of lists of node ids, each list beginning with a direct hypernym of the given element and ending with a root node """ paths = [] if id in self.parents: for parent in self.parents[id]: parent_paths = self.root_paths(parent) if len(parent_paths) == 0: paths.append([parent]) else: for parent_path in parent_paths: paths.append([parent] + parent_path) return paths def lcs(self, a, b, use_min_depth = False): """ Finds the lowest common subsumer of two elements. a - The ID of the first term. b - The ID of the second term. use_min_depth - If set to `True`, use the shortest path from the root to an element to determine its depth, otherwise the longest path. Returns: the id of the LCS or `None` if the two terms do not share any hypernyms. """ if (a,b) not in self._lcs_cache: hypernym_depths = self.all_hypernym_depths(a, use_min_depth) common_hypernyms = set(hypernym_depths.keys()) & set(self.all_hypernym_depths(b, use_min_depth).keys()) self._lcs_cache[(a,b)] = self._lcs_cache[(b,a)] = max(common_hypernyms, key = lambda hyp: hypernym_depths[hyp], default = None) return self._lcs_cache[(a,b)] def shortest_path_length(self, a, b): """ Determines the length of the shortest path between two elements of the hierarchy. a - The ID of the first term. b - The ID of the second term. Returns: length of the shortest path from `a` to `b`, measured in the number of edges. `None` is returned if there is no path. """ dist1 = self.all_hypernym_distances(a) dist2 = self.all_hypernym_distances(b) common_hypernyms = set(dist1.keys()) & set(dist2.keys()) return min((dist1[hyp] + dist2[hyp] for hyp in common_hypernyms), default = None) def depth(self, id, use_min_depth = False): """ Determines the depth of a certain element in the hierarchy. id - The ID of the element. use_min_depth - If set to `True`, use the shortest path from the root to an element to determine its depth, otherwise the longest path. Returns: the depth of the given element. Root nodes have depth 1. """ if id not in self._depths[use_min_depth]: if (not id in self.parents) or (len(self.parents[id]) == 0): self._depths[use_min_depth][id] = 1 # root nodes have depth 1 else: parent_depths = (self.depth(p, use_min_depth) for p in self.parents[id]) self._depths[use_min_depth][id] = 1 + min(parent_depths) if use_min_depth else 1 + max(parent_depths) return self._depths[use_min_depth][id] def wup_similarity(self, a, b): """ Computes the Wu-Palmer similarity of two elements in the hierarchy. a - The ID of the first term. b - The ID of the second term. Returns: similarity score in the range (0,1]. """ if (a,b) not in self._wup_cache: lcs = self.lcs(a, b) ds = self.depth(lcs) d1 = ds + self.shortest_path_length(a, lcs) d2 = ds + self.shortest_path_length(b, lcs) self._wup_cache[(a,b)] = self._wup_cache[(b,a)] = (2.0 * ds) / (d1 + d2) return self._wup_cache[(a,b)] def lcs_height(self, a, b): """ Computes the height of the lowest common subsumer of two elements, divided by the height of the entire hierarchy. a - The ID of the first term. b - The ID of the second term. Returns: dissimilarity score in the range [0,1]. """ return self.heights[self.lcs(a, b)] / self.max_height def hierarchical_precision(self, retrieved, labels, ks = [1, 10, 50, 100], compute_ahp = False, compute_ap = False, ignore_qids = True, all_ids = None): """ Computes average hierarchical precision for lists of retrieved images at several cut-off points. Hierarchical precision is a generalization of Precision@K which takes class similarities into account and is defined as the sum of similarities between the class of the query image and the class of the retrieved image over the top `k` retrieval results, divided by the maximum possible sum of top-k class similarities (Deng, Berg, Fei-Fei; CVPR 2011). Class similarity can either be measured by `(2 * depth(lcs(a,b))/(depth(a)+depth(b))` (Wu-Palmer similarity, "WUP") or `1.0 - height(lcs(a,b))/height(root)`, where `lcs(a,b)` is the lowest common subsumer of the classes `a` and `b`. Parameters: retrieved - Dictionary mapping query image IDs to ranked lists of IDs of retrieved images or a generator yielding (ID, retrieved_list) tuples. labels - Dictionary mapping image IDs to class labels. ks - Cut-off points `k` which hierarchical precision is to be computed for. compute_ahp - If set to `True`, two additional metrics named "AHP (WUP)" and "AHP (LCS_HEIGHT)" will be computed, giving the area under the entire hierarchical precision curve, normalized so that the optimum is 1.0. This argument may also be set to a positive integer, in which case the area under the HP@k curve from 1 to k will be computed, denoted as "AHP@k (LCS_HEIGHT)" and "AHP@k (WUP)". compute_ap - If set to `True`, an additional metric "AP" will be computed, providing classical (mean) average precision where all classes but the correct one are considered to be equally wrong. ignore_qids - If set to `True`, query ids appearing in the retrieved ranking will be ignored.
<gh_stars>100-1000 import inspect import json import uuid from collections import Counter from datetime import datetime from io import StringIO import mock from django.contrib.admin.utils import NestedObjects from django.db import transaction, IntegrityError from django.db.models.signals import post_delete, post_save from django.test import SimpleTestCase, TestCase from nose.tools import nottest from casexml.apps.case.mock import CaseFactory, CaseIndex, CaseStructure from corehq.apps.commtrack.helpers import make_product from corehq.apps.commtrack.tests.util import get_single_balance_block from corehq.apps.domain.models import Domain from corehq.apps.dump_reload.sql import SqlDataDumper, SqlDataLoader from corehq.apps.dump_reload.sql.dump import ( get_model_iterator_builders_to_dump, get_objects_to_dump, ) from corehq.apps.dump_reload.sql.load import ( DefaultDictWithKey, constraint_checks_deferred, ) from corehq.apps.hqcase.utils import submit_case_blocks from corehq.apps.products.models import SQLProduct from corehq.apps.zapier.consts import EventTypes from corehq.apps.zapier.models import ZapierSubscription from corehq.apps.zapier.signals.receivers import ( zapier_subscription_post_delete, ) from corehq.blobs.models import BlobMeta from corehq.form_processor.backends.sql.dbaccessors import LedgerAccessorSQL from corehq.form_processor.interfaces.dbaccessors import ( CaseAccessors, FormAccessors, ) from corehq.form_processor.models import ( CaseTransaction, CommCareCaseIndexSQL, CommCareCaseSQL, LedgerTransaction, LedgerValue, XFormInstanceSQL, ) from corehq.form_processor.tests.utils import ( FormProcessorTestUtils, create_form_for_test, sharded, ) from corehq.messaging.scheduling.scheduling_partitioned.models import ( AlertScheduleInstance, ) class BaseDumpLoadTest(TestCase): @classmethod def setUpClass(cls): post_delete.disconnect(zapier_subscription_post_delete, sender=ZapierSubscription) super(BaseDumpLoadTest, cls).setUpClass() cls.domain_name = uuid.uuid4().hex cls.domain = Domain(name=cls.domain_name) cls.domain.save() cls.default_objects_counts = Counter({}) @classmethod def tearDownClass(cls): cls.domain.delete() super(BaseDumpLoadTest, cls).tearDownClass() post_delete.connect(zapier_subscription_post_delete, sender=ZapierSubscription) def delete_sql_data(self): delete_domain_sql_data_for_dump_load_test(self.domain_name) def tearDown(self): self.delete_sql_data() super(BaseDumpLoadTest, self).tearDown() def _dump_and_load(self, expected_dump_counts, load_filter=None, expected_load_counts=None, dumper_fn=None): expected_load_counts = expected_load_counts or expected_dump_counts expected_dump_counts.update(self.default_objects_counts) models = list(expected_dump_counts) self._check_signals_handle_raw(models) output_stream = StringIO() if dumper_fn: dumper_fn(output_stream) else: SqlDataDumper(self.domain_name, [], []).dump(output_stream) self.delete_sql_data() # make sure that there's no data left in the DB objects_remaining = list(get_objects_to_dump(self.domain_name, [], [])) object_classes = [obj.__class__.__name__ for obj in objects_remaining] counts = Counter(object_classes) self.assertEqual([], objects_remaining, 'Not all data deleted: {}'.format(counts)) # Dump actual_model_counts, dump_lines = self._parse_dump_output(output_stream) expected_model_counts = _normalize_object_counter(expected_dump_counts) self.assertDictEqual(dict(expected_model_counts), dict(actual_model_counts)) # Load loader = SqlDataLoader(object_filter=load_filter) loaded_model_counts = loader.load_objects(dump_lines) normalized_expected_loaded_counts = _normalize_object_counter(expected_load_counts, for_loaded=True) self.assertDictEqual(dict(normalized_expected_loaded_counts), dict(loaded_model_counts)) self.assertEqual(sum(expected_load_counts.values()), sum(loaded_model_counts.values())) return dump_lines def _parse_dump_output(self, output_stream): dump_output = output_stream.getvalue().split('\n') dump_lines = [line.strip() for line in dump_output if line.strip()] actual_model_counts = Counter([json.loads(line)['model'] for line in dump_lines]) return actual_model_counts, dump_lines def _check_signals_handle_raw(self, models): """Ensure that any post_save signal handlers have been updated to handle 'raw' calls.""" whitelist_receivers = [ 'django_digest.models._post_save_persist_partial_digests' ] for model in models: for receiver in post_save._live_receivers(model): receiver_path = receiver.__module__ + '.' + receiver.__name__ if receiver_path in whitelist_receivers: continue args = inspect.getargspec(receiver).args message = 'Signal handler "{}" for model "{}" missing raw arg'.format( receiver, model ) self.assertIn('raw', args, message) @nottest def delete_domain_sql_data_for_dump_load_test(domain_name): for model_class, builder in get_model_iterator_builders_to_dump(domain_name, [], []): for iterator in builder.querysets(): with transaction.atomic(using=iterator.db), \ constraint_checks_deferred(iterator.db): collector = NestedObjects(using=iterator.db) collector.collect(iterator) collector.delete() assert [] == list(get_objects_to_dump(domain_name, [], [])), "Not all SQL objects deleted" @sharded class TestSQLDumpLoadShardedModels(BaseDumpLoadTest): maxDiff = None @classmethod def setUpClass(cls): super(TestSQLDumpLoadShardedModels, cls).setUpClass() cls.factory = CaseFactory(domain=cls.domain_name) cls.form_accessors = FormAccessors(cls.domain_name) cls.case_accessors = CaseAccessors(cls.domain_name) cls.product = make_product(cls.domain_name, 'A Product', 'prodcode_a') cls.default_objects_counts.update({SQLProduct: 1}) @classmethod def tearDownClass(cls): FormProcessorTestUtils.delete_all_cases_forms_ledgers(cls.domain_name) super(TestSQLDumpLoadShardedModels, cls).tearDownClass() def test_dump_load_form(self): expected_object_counts = Counter({ XFormInstanceSQL: 2, BlobMeta: 2 }) pre_forms = [ create_form_for_test(self.domain_name), create_form_for_test(self.domain_name) ] self._dump_and_load(expected_object_counts) form_ids = self.form_accessors.get_all_form_ids_in_domain('XFormInstance') self.assertEqual(set(form_ids), set(form.form_id for form in pre_forms)) for pre_form in pre_forms: post_form = self.form_accessors.get_form(pre_form.form_id) self.assertDictEqual(pre_form.to_json(), post_form.to_json()) def test_sql_dump_load_case(self): expected_object_counts = Counter({ XFormInstanceSQL: 2, BlobMeta: 2, CommCareCaseSQL: 2, CaseTransaction: 3, CommCareCaseIndexSQL: 1 }) pre_cases = self.factory.create_or_update_case( CaseStructure( attrs={'case_name': 'child', 'update': {'age': 3, 'diabetic': False}, 'create': True}, indices=[ CaseIndex(CaseStructure(attrs={'case_name': 'parent', 'update': {'age': 42}, 'create': True})), ] ) ) pre_cases[0] = self.factory.create_or_update_case(CaseStructure( case_id=pre_cases[0].case_id, attrs={'external_id': 'billie jean', 'update': {'name': '<NAME>'}} ))[0] self._dump_and_load(expected_object_counts) case_ids = self.case_accessors.get_case_ids_in_domain() self.assertEqual(set(case_ids), set(case.case_id for case in pre_cases)) for pre_case in pre_cases: post_case = self.case_accessors.get_case(pre_case.case_id) self.assertDictEqual(pre_case.to_json(), post_case.to_json()) def test_ledgers(self): expected_object_counts = Counter({ XFormInstanceSQL: 3, BlobMeta: 3, CommCareCaseSQL: 1, CaseTransaction: 3, LedgerValue: 1, LedgerTransaction: 2 }) case = self.factory.create_case() submit_case_blocks([ get_single_balance_block(case.case_id, self.product._id, 10) ], self.domain_name) submit_case_blocks([ get_single_balance_block(case.case_id, self.product._id, 5) ], self.domain_name) pre_ledger_values = LedgerAccessorSQL.get_ledger_values_for_case(case.case_id) pre_ledger_transactions = LedgerAccessorSQL.get_ledger_transactions_for_case(case.case_id) self.assertEqual(1, len(pre_ledger_values)) self.assertEqual(2, len(pre_ledger_transactions)) self._dump_and_load(expected_object_counts) post_ledger_values = LedgerAccessorSQL.get_ledger_values_for_case(case.case_id) post_ledger_transactions = LedgerAccessorSQL.get_ledger_transactions_for_case(case.case_id) self.assertEqual(1, len(post_ledger_values)) self.assertEqual(2, len(post_ledger_transactions)) self.assertEqual(pre_ledger_values[0].ledger_reference, post_ledger_values[0].ledger_reference) self.assertDictEqual(pre_ledger_values[0].to_json(), post_ledger_values[0].to_json()) pre_ledger_transactions = sorted(pre_ledger_transactions, key=lambda t: t.pk) post_ledger_transactions = sorted(post_ledger_transactions, key=lambda t: t.pk) for pre, post in zip(pre_ledger_transactions, post_ledger_transactions): self.assertEqual(str(pre), str(post)) class TestSQLDumpLoad(BaseDumpLoadTest): def test_case_search_config(self): from corehq.apps.case_search.models import CaseSearchConfig, FuzzyProperties expected_object_counts = Counter({ CaseSearchConfig: 1, FuzzyProperties: 2, }) pre_config, created = CaseSearchConfig.objects.get_or_create(pk=self.domain_name) pre_config.enabled = True pre_fuzzies = [ FuzzyProperties(domain=self.domain, case_type='dog', properties=['breed', 'color']), FuzzyProperties(domain=self.domain, case_type='owner', properties=['name']), ] for fuzzy in pre_fuzzies: fuzzy.save() pre_config.fuzzy_properties.set(pre_fuzzies) pre_config.save() self._dump_and_load(expected_object_counts) post_config = CaseSearchConfig.objects.get(domain=self.domain_name) self.assertTrue(post_config.enabled) self.assertEqual(pre_config.fuzzy_properties, post_config.fuzzy_properties) post_fuzzies = FuzzyProperties.objects.filter(domain=self.domain_name) self.assertEqual(set(f.case_type for f in post_fuzzies), {'dog', 'owner'}) def test_users(self): from corehq.apps.users.models import CommCareUser from corehq.apps.users.models import WebUser from django.contrib.auth.models import User expected_object_counts = Counter({User: 3}) ccuser_1 = CommCareUser.create( domain=self.domain_name, username='user_1', password='<PASSWORD>', created_by=None, created_via=None, email='<EMAIL>', ) ccuser_2 = CommCareUser.create( domain=self.domain_name, username='user_2', password='<PASSWORD>', created_by=None, created_via=None, email='<EMAIL>', ) web_user = WebUser.create( domain=self.domain_name, username='webuser_t1', password='<PASSWORD>', created_by=None, created_via=None, email='<EMAIL>', ) self.addCleanup(ccuser_1.delete, self.domain_name, deleted_by=None) self.addCleanup(ccuser_2.delete, self.domain_name, deleted_by=None) self.addCleanup(web_user.delete, self.domain_name, deleted_by=None) self._dump_and_load(expected_object_counts) def test_dump_roles(self): from corehq.apps.users.models import UserRole, Permissions, RoleAssignableBy, RolePermission expected_object_counts = Counter({ UserRole: 2, RolePermission: 11, RoleAssignableBy: 1 }) role1 = UserRole.create(self.domain_name, 'role1') role2 = UserRole.create( self.domain_name, 'role1', permissions=Permissions(edit_web_users=True), assignable_by=[role1.id] ) self.addCleanup(role1.delete) self.addCleanup(role2.delete) self._dump_and_load(expected_object_counts) role1_loaded = UserRole.objects.get(id=role1.id) role2_loaded = UserRole.objects.get(id=role2.id) self.assertEqual(role1_loaded.permissions.to_list(), Permissions().to_list()) self.assertEqual(role1_loaded.assignable_by, []) self.assertEqual(role2_loaded.permissions.to_list(), Permissions(edit_web_users=True).to_list()) self.assertEqual(role2_loaded.assignable_by, [role1_loaded.get_id]) def test_device_logs(self): from corehq.apps.receiverwrapper.util import submit_form_locally from phonelog.models import DeviceReportEntry, ForceCloseEntry, UserEntry, UserErrorEntry from corehq.apps.users.models import CommCareUser from django.contrib.auth.models import User expected_object_counts = Counter({ User: 1, DeviceReportEntry: 7, UserEntry: 1, UserErrorEntry: 2, ForceCloseEntry: 1 }) user = CommCareUser.create( domain=self.domain_name, username='user_1', password='<PASSWORD>', created_by=None, created_via=None, email='<EMAIL>', uuid='428d454aa9abc74e1964e16d3565d6b6' # match ID in devicelog.xml ) self.addCleanup(user.delete, self.domain_name, deleted_by=None) with open('corehq/ex-submodules/couchforms/tests/data/devicelogs/devicelog.xml', 'rb') as f: xml = f.read() submit_form_locally(xml, self.domain_name) self._dump_and_load(expected_object_counts) def test_demo_user_restore(self): from corehq.apps.users.models import CommCareUser from corehq.apps.ota.models import DemoUserRestore from django.contrib.auth.models import User expected_object_counts = Counter({ User: 1, DemoUserRestore: 1 }) user_id = uuid.uuid4().hex user = CommCareUser.create( domain=self.domain_name, username='user_1', password='<PASSWORD>', created_by=None, created_via=None, email='<EMAIL>', uuid=user_id ) self.addCleanup(user.delete, self.domain_name, deleted_by=None) DemoUserRestore( demo_user_id=user_id, restore_blob_id=uuid.uuid4().hex, content_length=1027, restore_comment="Test migrate demo user restore" ).save() self._dump_and_load(expected_object_counts) def test_products(self): from corehq.apps.products.models import SQLProduct expected_object_counts = Counter({SQLProduct: 3}) p1 = SQLProduct.objects.create(domain=self.domain_name, product_id='test1', name='test1') p2 = SQLProduct.objects.create(domain=self.domain_name, product_id='test2', name='test2') parchived = SQLProduct.objects.create(domain=self.domain_name, product_id='test3', name='test3', is_archived=True) self._dump_and_load(expected_object_counts) self.assertEqual(2, SQLProduct.active_objects.filter(domain=self.domain_name).count()) all_active = SQLProduct.active_objects.filter(domain=self.domain_name).all() self.assertTrue(p1 in all_active) self.assertTrue(p2 in all_active) self.assertTrue(parchived not in all_active) def test_location_type(self): from corehq.apps.locations.models import LocationType from corehq.apps.locations.tests.test_location_types import make_loc_type expected_object_counts = Counter({LocationType: 7}) state = make_loc_type('state', domain=self.domain_name) district = make_loc_type('district', state, domain=self.domain_name) section = make_loc_type('section', district, domain=self.domain_name) block = make_loc_type('block', district, domain=self.domain_name) center = make_loc_type('center', block, domain=self.domain_name) county = make_loc_type('county', state, domain=self.domain_name) city = make_loc_type('city', county, domain=self.domain_name) self._dump_and_load(expected_object_counts) hierarchy = LocationType.objects.full_hierarchy(self.domain_name) desired_hierarchy = { state.id: ( state, { district.id: ( district, { section.id: (section, {}), block.id: (block, { center.id: (center, {}), }), }, ), county.id: ( county, {city.id: (city, {})}, ), }, ), } self.assertEqual(hierarchy, desired_hierarchy) def test_location(self): from corehq.apps.locations.models import LocationType, SQLLocation from corehq.apps.locations.tests.util import setup_locations_and_types expected_object_counts = Counter({LocationType: 3, SQLLocation: 11}) location_type_names = ['province', 'district', 'city'] location_structure = [ ('Western Cape', [ ('Cape Winelands', [ ('Stellenbosch', []), ('Paarl', []), ]), ('Cape Town', [ ('Cape Town City', []), ]) ]), ('Gauteng', [ ('Ekurhuleni ', [ ('Alberton', []), ('Benoni', []), ('Springs', []), ]), ]), ] location_types, locations = setup_locations_and_types( self.domain_name, location_type_names, [], location_structure, ) self._dump_and_load(expected_object_counts) names = ['Cape Winelands', 'Paarl', 'Cape Town'] location_ids = [locations[name].location_id for name in names] result = SQLLocation.objects.get_locations_and_children(location_ids) self.assertItemsEqual( [loc.name for loc in result], ['Cape Winelands', 'Stellenbosch', 'Paarl', 'Cape Town', 'Cape Town City'] ) result = SQLLocation.objects.get_locations_and_children([locations['Gauteng'].location_id]) self.assertItemsEqual( [loc.name for loc in result], ['Gauteng', 'Ekurhuleni ', 'Alberton', 'Benoni', 'Springs'] ) def test_sms(self): from corehq.apps.sms.models import PhoneNumber, MessagingEvent, MessagingSubEvent expected_object_counts = Counter({PhoneNumber: 1, MessagingEvent: 1, MessagingSubEvent: 1}) phone_number = PhoneNumber( domain=self.domain_name, owner_doc_type='CommCareCase', owner_id='fake-owner-id1', phone_number='99912341234', backend_id=None, ivr_backend_id=None, verified=True, is_two_way=True, pending_verification=False, contact_last_modified=datetime.utcnow() ) phone_number.save() event = MessagingEvent.objects.create( domain=self.domain_name, date=datetime.utcnow(), source=MessagingEvent.SOURCE_REMINDER, content_type=MessagingEvent.CONTENT_SMS, status=MessagingEvent.STATUS_COMPLETED ) MessagingSubEvent.objects.create( parent=event, date=datetime.utcnow(), recipient_type=MessagingEvent.RECIPIENT_CASE, content_type=MessagingEvent.CONTENT_SMS, status=MessagingEvent.STATUS_COMPLETED ) self._dump_and_load(expected_object_counts) def test_message_scheduling(self): AlertScheduleInstance( schedule_instance_id=uuid.uuid4(), domain=self.domain_name, recipient_type='CommCareUser', recipient_id=uuid.uuid4().hex, current_event_num=0, schedule_iteration_num=1, next_event_due=datetime(2017, 3, 1), active=True, alert_schedule_id=uuid.uuid4(), ).save() self._dump_and_load({AlertScheduleInstance: 1}) def test_mobile_backend(self): from corehq.apps.sms.models import ( SQLMobileBackend, SQLMobileBackendMapping, ) domain_backend = SQLMobileBackend.objects.create( domain=self.domain_name, name='test-domain-mobile-backend', display_name='Test Domain Mobile Backend', hq_api_id='TDMB', inbound_api_key='test-domain-mobile-backend-inbound-api-key', supported_countries=["*"], backend_type=SQLMobileBackend.SMS, is_global=False, ) SQLMobileBackendMapping.objects.create( domain=self.domain_name, backend=domain_backend, backend_type=SQLMobileBackend.SMS, prefix='123', ) global_backend = SQLMobileBackend.objects.create( domain=None, name='test-global-mobile-backend', display_name='Test Global Mobile Backend', hq_api_id='TGMB', inbound_api_key='test-global-mobile-backend-inbound-api-key', supported_countries=["*"], backend_type=SQLMobileBackend.SMS, is_global=True, ) SQLMobileBackendMapping.objects.create( domain=self.domain_name, backend=global_backend, backend_type=SQLMobileBackend.SMS, prefix='*', ) self._dump_and_load({ SQLMobileBackendMapping: 1, SQLMobileBackend: 1, }) self.assertEqual(SQLMobileBackend.objects.first().domain, self.domain_name) self.assertEqual(SQLMobileBackendMapping.objects.first().domain, self.domain_name) def test_case_importer(self): from corehq.apps.case_importer.tracking.models import ( CaseUploadFileMeta, CaseUploadFormRecord, CaseUploadRecord, ) upload_file_meta = CaseUploadFileMeta.objects.create( identifier=uuid.uuid4().hex, filename='picture.jpg', length=1024, ) case_upload_record = CaseUploadRecord.objects.create( domain=self.domain_name, upload_id=uuid.uuid4(), task_id=uuid.uuid4(), couch_user_id=uuid.uuid4().hex, case_type='person', upload_file_meta=upload_file_meta, ) CaseUploadFormRecord.objects.create( case_upload_record=case_upload_record, form_id=uuid.uuid4().hex, ) self._dump_and_load(Counter({ CaseUploadFileMeta: 1, CaseUploadRecord: 1, CaseUploadFormRecord: 1, })) def test_transifex(self): from corehq.apps.translations.models import TransifexProject, TransifexOrganization org = TransifexOrganization.objects.create(slug='test', name='demo', api_token='<PASSWORD>') TransifexProject.objects.create( organization=org, slug='testp', name='demop', domain=self.domain_name ) TransifexProject.objects.create( organization=org, slug='testp1', name='demop1', domain=self.domain_name ) self._dump_and_load(Counter({TransifexOrganization: 1, TransifexProject:
from __future__ import print_function import logging import curses import re import sys from config import * from data import Span, span_compare_ge, span_compare_le class View(object): def __init__(self, window, cursor, linking_pos, datum, my_config, cnum, total_num, prev_view=None): self.window = window self.cursor = cursor self.linking_pos = linking_pos self.datum = datum self.top = max(0, cursor.start[0] - self.window.getmaxyx()[0] - 10) self.show_help = False self.show_progress = False self.progress = "file {} / {}".format(cnum + 1, total_num) self.show_legend = False self.legend = "Legend (TBD)" self.config = my_config self.line_numbers = False if prev_view is not None: self.show_help = prev_view.show_help self.show_progress = prev_view.show_progress self.show_legend = prev_view.show_legend self.line_numbers = prev_view.line_numbers self.last_moved_pos = cursor if self.config.args.prevent_self_links and self.cursor == self.linking_pos: # TODO: In this case move the linking pos along one step pass def instructions(self): shared = [ "Misc | ] [ | save and go to next ] / previous [ file", " | q Q | quit with or without saving ", " | s | save the current file ", ] if self.config.annotation_type == 'link': return [ "Keybindings - to hide/show this, type 'h'", "Colours are underline:selected, green:linking, blue:linked, yellow:has_link", "Type | Key | Affect ", "----------|-----------------------|------------------------------------", "Move | j or LEFT | move selected item to the left ", " | J or [SHIFT + LEFT] | move linking item to the left ", " | i or UP | move selected item up a line ", " | I or [SHIFT + UP] | move linking item up a line ", " | o or DOWN | move selected item down a line ", " | O or [SHIFT + DOWN] | move linking item down a line ", " | ; or RIGHT | move selected item to the right ", " | : or [SHIFT + RIGHT] | move linking item to the right ", "Annotate | d | create a link and move down / right", " | D | create a link ", " | u | undo all annotations for this item ", ] + shared else: return [ "Keybindings - to hide/show this, type 'h'", "Underline is current item, colours are labels", "Type | Key | Affect ", "----------|-----------------------|----------------------------------", "Move | j or LEFT | move to the left ", " | J or [SHIFT + LEFT] | go to the start of the line ", " | i or UP | move up a line ", " | I or [SHIFT + UP] | go to first line ", " | o or DOWN | move down a line ", " | O or [SHIFT + DWON] | go to last line ", " | ; or RIGHT | move to the right ", " | : or [SHIFT + RIGHT] | go to the end of the line ", "Span Edit | m / | extend left or right ", " | k l | contract left or right ", "Annotate | SPACE then a, s, or d | [un]mark this item as a, s, ord d", " | u | undo annotation on this item ", ] + shared def toggle_help(self): self.show_help = not self.show_help def toggle_progress(self): self.show_progress = not self.show_progress def toggle_legend(self): self.show_legend = not self.show_legend def shift_view(self, down=False): self.last_moved_pos = None if down: self.top += 10 else: self.top -= 10 def _check_move_allowed(self, move_link, new_pos): if self.linking_pos is None: return True if self.config.args.prevent_forward_links: if move_link and self.cursor > new_pos: return False if (not move_link) and self.linking_pos < new_pos: return False if self.config.args.prevent_self_links: if move_link and self.cursor == new_pos: return False if (not move_link) and self.linking_pos == new_pos: return False return True def move(self, direction, distance, maxjump=False, move_link=False): # TODO: if self links are prevented, but forward links are allowed, # then skip over when moving. mover = self.cursor if move_link: mover = self.linking_pos ### logging.debug("Moving linking pos") ### logging.debug("Move {} {} {}".format(self.cursor, direction, distance)) new_pos = mover.edited(direction, 'move', distance, maxjump) ### logging.debug("Moving {} to {}".format(self.cursor, new_pos)) if self._check_move_allowed(move_link, new_pos): if move_link: self.linking_pos = new_pos else: self.cursor = new_pos self.last_moved_pos = new_pos elif self.linking_pos is not None: # Move as far as possible if self.config.args.prevent_self_links: if move_link: self.linking_pos = self.cursor.edited('next', 'move', 1, False) self.last_moved_pos = self.linking_pos else: self.cursor = self.linking_pos.edited('previous', 'move', 1, False) self.last_moved_pos = self.cursor else: if move_link: self.linking_pos = self.cursor else: self.cursor = self.linking_pos self.last_moved_pos = self.cursor def search(self, query, direction, count, maxjump=False, move_link=False): logging.debug("Search {} {} {} {} {}".format(query, direction, count, maxjump, move_link)) new_pos = None if query is None: if len(self.datum.disagreements) == 0: new_pos = self.datum.get_next_unannotated(self.cursor, self.linking_pos, direction, move_link) if new_pos == self.linking_pos or new_pos is None: new_pos = self.datum.get_next_self_link(self.cursor, self.linking_pos, direction, move_link) else: new_pos = self.datum.get_next_disagreement(self.cursor, self.linking_pos, direction, move_link) else: mover = self.cursor if move_link: mover = self.linking_pos new_pos = mover.search(query, direction, count, maxjump) if new_pos is not None: if self._check_move_allowed(move_link, new_pos): if move_link: self.linking_pos = new_pos else: self.cursor = new_pos self.last_moved_pos = new_pos def adjust(self, direction, distance, change, maxjump, link): changer = self.cursor if link and self.linking_pos is not None: changer = self.linking_pos new_pos = changer.edited(direction, change, distance, maxjump) if self._check_move_allowed(link, new_pos): if link: self.linking_pos = new_pos else: self.cursor = new_pos def put_cursor_beside_link(self): self.cursor = self.linking_pos.edited('previous') def marking_to_color(self, marking): name = DEFAULT_COLOR modifier = curses.A_BOLD has_link = False has_ref = False has_self_link = False for mark in marking: if mark == 'cursor': modifier += curses.A_UNDERLINE elif mark == 'link': has_link = True elif mark == 'ref': has_ref = True elif mark == 'self-link': has_self_link = True elif mark == 'linked': name = IS_LINKED_COLOR elif mark in self.config.labels: if name != DEFAULT_COLOR: name = OVERLAP_COLOR else: name = self.config.get_color_for_label(mark) elif mark.startswith("compare-"): if 'ref' in mark: count = int(mark.split("-")[-2]) if 'True' in mark: # First, cases where this is related to the current linking line if name == DEFAULT_COLOR or name == COMPARE_DISAGREE_COLOR: name = COMPARE_REF_COLOR elif count > 0 and 'last' in mark: # If unrelated, but there is a disagreement, indicate it if name == DEFAULT_COLOR: name = COMPARE_DISAGREE_COLOR else: count = int(mark.split("-")[-2]) if name == DEFAULT_COLOR: if count == 0: key = mark.split("-")[-1] if key in self.config.labels: name = self.config.get_color_for_label(key) else: name = COMPARE_DISAGREE_COLOR ### key = mark.split("-")[-1] ### if key in self.config.labels: ### name = self.config.get_color_for_label(mark) # Override cases if has_link: if has_ref: if has_self_link: name = SELF_LINK_COLOR else: name = IS_LINKED_COLOR else: name = LINK_COLOR elif has_ref: name = REF_COLOR return curses.color_pair(name) + modifier def do_contents(self, height, width, markings, number_width, trial=False): # For linked items, colour them to indicate it # For labels, colour them always, and add beginning / end # For freeform text, include it at the bottom # Row and column indicate the position on the screen, while line and # token indicate the position in the text. first_span = None last_span = None row = -1 for line_no, line in enumerate(self.datum.doc.tokens): # If this line is above the top of what we are shwoing, skip it if line_no < self.top: continue if row >= height: break if first_span is None: first_span = Span('character', self.datum.doc, (line_no, 0, 0)) # Set row += 1 column = number_width if (not trial) and column > 0: self.window.addstr(row, 0, str(line_no), curses.color_pair(LINE_NUMBER_COLOR)) for token_no, token in enumerate(line): # Check if we are going on to the next line and adjust # accordingly. space_before = 1 if column > number_width else 0 wide_token = False if column + len(token) + space_before > width: if token_no != 0: column = 0 row += 1 space_before = 0 else: wide_token = True # If this takes us off the screen, stop if row >= height: break end_pos = len(token) - 1 if wide_token: end_pos = width - column - space_before - 1 last_span = Span('character', self.datum.doc, (line_no, token_no, end_pos)) for char_no, char in enumerate(token): if column >= width: column = 0 row += 1 if row >= height: break # Allow multiple layers of color, with
# -*- coding: utf-8 -*- """ Copyright (c) 2020, University of Southampton All rights reserved. Licensed under the BSD 3-Clause License. See LICENSE.md file in the project root for full license information. """ import math import unittest from unittest.mock import patch import numpy as np from auv_cal.cone_fitting import CircularCone, build_matrix, rotation_matrix cone_points = np.array( [ [-1.33048143698340549, -10.9167707649920445, 16.0510645154023663], [-1.35444014094203435, -9.85047375230570488, 14.4832746889369801], [-1.37413509610432372, -8.97394348476293047, 13.1945012799404964], [-1.39061151089872914, -8.24066080532580258, 12.1163465903305951], [-1.40459877249746512, -7.61816283388455684, 11.2010800416958851], [-1.41662135489108421, -7.08310677445542147, 10.4143804293164877], [-1.4270660565611577, -6.6182773637618908, 9.73093593640615495], [-1.43622439354878639, -6.21070008021096864, 9.13166996168519063], [-1.4443202368208774, -5.85041108585671843, 8.60193254967370535], [-1.45152835738756214, -5.52963183766690403, 8.13028681131961761], [-1.45798718750993861, -5.24220112363124002, 7.70767383958905761], [-1.46380779856235255, -4.98317548446758263, 7.32682520450624164], [-1.4690803425334733, -4.74854252132031807, 6.98184142587877954], [-1.47387875555397518, -4.53501155555069246, 6.66788417777062747], [-1.47826424698250891, -4.33985833976516311, 6.38094796518596752], [-1.48599279676010076, -3.99594701649774953, 5.87529084769434728], [-1.48941531414188821, -3.8436524235919709, 5.65137020406256863], [-1.48228792478428839, -4.16080820992080991, 6.11768832114919014], [-1.49258657516399906, -3.70254034149209099, 5.44389134585008971], [-1.49553327289983051, -3.57142274125332682, 5.25110749924903253], [-1.49827844859047166, -3.44927412625144614, 5.07151086375407889], [-1.50084209549335168, -3.33520465665189247, 4.90379309673348551], [-1.5032416467473424, -3.22843843596744717, 4.74681338788338714], [-1.50549237243837286, -3.12829583883298223, 4.59957247553411985], [-1.5076077049881853, -3.03417902890704028, 4.46119135343988127], [-1.50959950752068495, -2.9455600146745069, 4.33089370907611126], [-1.5114782965332878, -2.86197073899794185, 4.20799135218291109], [-1.51325342770012639, -2.78299480956345135, 4.09187205593626135], [-1.51493325173750759, -2.70826056176537699, 3.98198935722065528], [-1.51652524581227666, -2.63743521010830717, 3.87785395736237071], [-1.51803612485603789, -2.57021989393614847, 3.77902643780299297], [-1.51947193628102517, -2.50634546190684571, 3.68511106195990923], [-1.52083814091515213, -2.44556886981331001, 3.59575047889738286], [-1.52213968243994757, -2.38767009010187881, 3.51062117935291829], [-1.52338104719290901, -2.33244945024751615, 3.42942958231705086], [-1.5245663158588143, -2.27972533212532991, 3.35190865239052416], [-1.52569920830506844, -2.22933217651882165, 3.27781496578782994], [-1.52678312259916749, -2.18111874657023952, 3.20692615706655504], [-1.52782116907020771, -2.13494661180116374, 3.13903869016389825], [-1.52881620013376307, -2.09068882069574435, 3.07396590667921155], [-1.52977083648234569, -2.04822873503912373, 3.01153631198721161], [-1.53068749014795524, -2.00745900347272821, 2.95159206604349533], [-1.53156838486393321, -1.96828065524682172, 2.89398765091759236], [-1.53241557408812668, -1.93060229806378203, 2.83858869137190428], [-1.32745348765844562, -10.5515533634389271, 16.0145350241871434], [-1.35166099288881081, -9.52306608626207485, 14.4535567535042144], [-1.37156859611230608, -8.6772707100982629, 13.1698576422620413], [-1.388228538826356, -7.969460130299451, 12.0955838429222311], [-1.40237562640273339, -7.36841430821775134, 11.1833513936014572], [-1.41453853690862541, -6.85167103815115563, 10.3990684627423171], [-1.42510736284869588, -6.40265626787095243, 9.71757991623639761], [-1.43437622860982006, -6.00887339012222021, 9.11991912920162839], [-1.4425710923938222, -5.66072200812765747, 8.59151517685181432], [-1.44986841242063047, -5.35070485536185014, 8.12098914337984112], [-1.45640799788928721, -5.0728817904008876, 7.69932542704392819], [-1.46230205398588131, -4.82248549718748087, 7.3192884723442253], [-1.46764167411843482, -4.59564564622944172, 6.97500411790629915], [-1.4725015821645091, -4.38918740837497801, 6.6616537923884751], [-1.47694365139982819, -4.2004819443481356, 6.37524759617999237], [-1.48101955309625644, -4.02733387245473207, 6.11245350641919938], [-1.48477277598046942, -3.86789546683987773, 5.87046715209084713], [-1.48824018426474902, -3.72060046066187322, 5.64691134432713326], [-1.49145323274157793, -3.58411241976658212, 5.43975772096831367], [-1.49443892388951505, -3.45728407765484835, 5.24726502753743862], [-1.49722056871179965, -3.33912500935725065, 5.06793005452447254], [-1.49981839671090289, -3.2287757151415275, 4.90044830314222146], [-1.5022500487826842, -3.12548667867538388, 4.74368220102183269], [-1.50453097843558403, -3.02860132022747441, 4.59663522957105375], [-1.50667478062952731, -2.93754202512675144, 4.45843071878108521], [-1.5086934630235378, -2.8517986191298581, 4.32829435580838151], [-1.51059767106570364, -2.7709188049134208, 4.20553967003794948], [-1.5123968758363231, -2.69450018108770228, 4.08955592000569013], [-1.51409953164203581, -2.62218354641916784, 3.97979793093618905], [-1.51571320889519412, -2.55364725411690285, 3.87577752600646086], [-1.5172447066853012, -2.48860242895472661, 3.77705626717160925], [-1.51870014857347702, -2.42678889720504065, 3.68323927785474758], [-1.52008506445628644, -2.36797170844949179, 3.59396996395317991], [-1.52140446080649072, -2.31193815122554991, 3.50892548436006813], [-1.52266288117148241, -2.25849518259884929, 3.42781284971857625], [-1.52386445847031893, -2.207467206195322, 3.35036555004806136], [-1.52501296035767653, -2.15869414479853772, 3.2763406294442321], [-1.52611182870398387, -2.11202976293854894, 3.20551614020189435], [-1.52716421406319269, -2.06734020244362338, 3.13768892016040235], [-1.52817300585515659, -2.02450270006452415, 3.07267264638825299], [-1.52914085887177409, -1.98340446129811698, 3.01029612593794882], [-1.53007021661865283, -1.94394166865484541, 2.95040179065195129], [-1.5309633319247562, -1.90601860601074069, 2.89284436815501111], [-1.53182228518563335, -1.86954688349427678, 2.83748970543244283], [-1.32451545348815269, -10.1885804114473277, 15.9790902786198927], [-1.34896310068767478, -9.19747568650687342, 14.4247077016716148], [-1.36907615603067323, -8.38209891447350941, 13.1459251308659528], [-1.38591355909381164, -7.69951977274339772, 12.075413510252563], [-1.40021529147628576, -7.11973877021840096, 11.1661236379591937], [-1.41251405685573106, -6.62115964151123837, 10.3841853711034613], [-1.42320310758531021, -6.18783959819700158, 9.70459510317229679], [-1.43257907375540361, -5.80775300171109521, 9.10849262435023554], [-1.44086992104320366, -5.47165792801216533, 8.58138351709908065], [-1.44825373201933338, -5.17233475721190405, 8.11194501089400255], [-1.45487164264596602, -4.90406171678415514, 7.69120345915648329], [-1.46083695140638192, -4.66224558959483648, 7.3119551663479001], [-1.46624166006955803, -4.44315654566532725, 6.96835051578512132], [-1.4711612530270497, -4.24373438373187462, 6.65559009181948902], [-1.47565824484143082, -4.06144471057274536, 6.36969912108158987], [-1.47978485113488145, -3.89417066088126695, 6.10735765314878698], [-1.48358502556404703, -3.74013031654801686, 5.86577104645280656], [-1.48709603173194416, -3.59781297999663918, 5.64257002362806226], [-1.49034966937129143, -3.46592946365416621, 5.43573270883095105], [-1.49337324037567987, -3.34337292621420845, 5.24352320594595067], [-1.49619031687346515, -3.22918773425927741, 5.06444276322996689], [-1.49882135710728481, -3.12254449397350964, 4.89719061471512163], [-1.50128420317809863, -3.02271987217066851, 4.74063233285433139], [-1.50359448627170611, -2.92908016806176486, 4.59377406357686802], [-1.50576595882755115, -2.8410678468444357, 4.45574140646769035], [-1.50781076856832441, -2.75819043030791056, 4.32576199153290641], [-1.50973968592583652, -2.68001127679142614, 4.20315101910121935], [-1.5115622938551192, -2.60614188595710239, 4.08729919114272189], [-1.51328714709902634, -2.53623544206540696, 3.97766258497257974], [-1.51492190648974168, -2.46998136927675294, 3.87375411414904081], [-1.51647345273578726, -2.40710071862823316, 3.77513629371527903], [-1.51794798325962299, -2.34734224215405618, 3.6814150831116117], [-1.519351094959976, -2.29047903762810412, 3.59223462401341065], [-1.5206878552293559, -2.23630566945493481, 3.50727272492849007], [-1.52196286312617457, -2.1846356886978584, 3.4262369717744745], [-1.52318030225799261, -2.13529948914671852, 3.34886136547843716], [-1.52434398665727322, -2.08814244747571687, 3.27490340512441591], [-1.52545740070954761, -2.04302330452171521, 3.2041415492582237], [-1.52652373401452657, -1.9998127519840958, 3.13637299936172376], [-1.52754591191485734, -1.95839219476263748, 3.07141175878606987], [-1.52852662230792369, -1.91865266398484113, 3.00908692801622601], [-1.5294683392581172, -1.88049385974358629, 2.949241203364525], [-1.53037334384639601, -1.8438233058390312, 2.89172955132421405], [-1.53124374262694096, -1.80855560152788319, 2.83641803506289669], [-1.321668371452974, -9.82779045439390941, 15.9447427889390827], [-1.34634751549444576, -8.87365407939521234, 14.3967387736394699], [-1.36665881953194268, -8.08838893192374364, 13.1227137671383485], [-1.38366759548346718, -7.4308074572260292, 12.0558445124994709], [-1.39811876466511831, -6.87210918225227729, 11.1494047249989503], [-1.41054888119169086, -6.39154961789985165, 10.369738259385791], [-1.42135422510302023, -5.97380761275182426, 9.69198786827494096], [-1.43083383078813764, -5.60732176930486492, 9.09739618088944546], [-1.43921759239807101, -5.28320382019545143, 8.57154274966192986], [-1.44668515445181955, -4.99450827132175856, 8.10315910916167859], [-1.45337892976499905, -4.73572909698707623, 7.68331220735316922], [-1.45941326974596208, -4.50244519389711773, 7.30482918527101255], [-1.46488105152979653, -4.29106570650142505, 6.96188418934110409], [-1.46985849280231418, -4.09864387416029974, 6.64969635445662277], [-1.47440872676395429, -3.92273881432611748, 6.36430555910495865], [-1.47858449439247952, -3.76131143310367655, 6.10240354922510253], [-1.48243019822857547, -3.6126450208931673, 5.86120511148356904], [-1.48598348762010191, -3.47528396298249742, 5.6383486364940163], [-1.48927649556311748, -3.34798592025817143, 5.43181853614299825], [-1.49233681331665768, -3.22968414822261751, 5.23988410944393568], [-1.49518826543879846, -3.11945753221044297, 5.06105092725866701], [-1.49785153135107318, -3.01650655619313346, 4.8940218437547216], [-1.5003446477537794, -2.9201338781784969, 4.73766548166545753], [-1.50268341771383884, -2.82972851387670987, 4.59099057165169544], [-1.50488174604351532, -2.74475287014226144, 4.45312491517908171], [-1.50695191601253953, -2.66473204660753948, 4.32329802734085256], [-1.50890481902729401, -2.58924495572730917, 4.20082672989551398], [-1.51075014634660376, -2.51791691057767286, 4.0851031256096384], [-1.51249654995869975, -2.45041340496102178, 3.97558450701434785], [-1.51415177825564773, -2.38643486790292325, 3.87178484602870787], [-1.51572279099413754, -2.32571221898571379, 3.77326758287164932], [-1.51721585714069218, -2.26800308541649898, 3.67963948858090539], [-1.51863663850207109, -2.21308856867135972, 3.59054541917198167], [-1.51999026149322858, -2.16077046976978604, 3.50566381388511816], [-1.52128137896063942, -2.11086889903629427, 3.42470281722957326], [-1.5225142236322351, -2.06322020959508512, 3.3473969262568537], [-1.52369265448789903, -2.01767520457301686, 3.2735040819020842], [-1.52482019712087991, -1.97409757662971086, 3.2028031372561343], [-1.52590007897937352, -1.9323625454323945, 3.13509164698600484], [-1.52693526023024972, -1.89235566438791647, 3.07018393136039558], [-1.52792846086664036, -1.85397177259968915, 3.00790937589018537], [-1.52888218458190606, -1.81711407183915075, 2.94811093379420797], [-1.52979873985145254, -1.78169331147357957, 2.89064380361489315], [-1.5306802585959256, -1.74762706690086023, 2.83537425854087788], [-1.31891331193991546, -9.46912121392759865, 15.9115054684033392], [-1.34381531730794457, -8.55155201923237662, 14.3696615180315437], [-1.36431765567960817, -7.79610088959776171, 13.1002338162705225], [-1.38149169446193576, -7.16329026758040843, 12.0368859674877928], [-1.39608706341569322, -6.62549792807447702, 11.1332027684251464], [-1.40864399506977045, -6.16281747843024608, 10.3557343700051092], [-1.41956166690667884, -5.76054009759232066, 9.67976469970423814], [-1.42914141738132638, -5.40756211982123514, 9.08663563348532399], [-1.4376149908286735, -5.09534427129150114, 8.56199814158089723], [-1.44516353173664092, -4.81721177245546883, 8.09463621050106497], [-1.45193068010447646, -4.56787180257588155, 7.675656011114115], [-1.45803180001655774, -4.34307344685783203, 7.29791448838074874], [-1.46356061102782298, -4.13936334432111508, 6.95560876251101767], [-1.46859403689989421, -3.95390702242279213, 6.64397590732115617], [-1.47319580679384399, -3.7843562009383156, 6.35906997336241897], [-1.47741916800214623, -3.62874883368948264, 6.09759402232464787], [-1.48130895584721989, -3.48543283728757602, 5.85677196408501555], [-1.48490319171431695, -3.3530072070968453, 5.63424961049666262], [-1.4882343301225105, -3.23027606538219336, 5.42801745986543516], [-1.49133024158350969, -3.11621244509987116, 5.23634984078389021], [-1.49421499432813443, -3.00992948497753687, 5.05775650957832479], [-1.4969094813363939, -2.91065732482076589, 4.8909438261720366], [-1.4994319272458998, -2.81772442703927517, 4.73478336757849672], [-1.50179830115538548, -2.73054236573706444, 4.58828636814031654], [-1.5040226550932716, -2.64859335488123326, 4.45058276239864359], [-1.50611740331303223, -2.57141995687590841, 4.32090389182183898], [-1.50809355414279045, -2.49861653940817385, 4.19856814925552069], [-1.50996090353358192, -2.42982214361725868, 4.08296899489971477], [-1.51172819749026788, -2.36471449886745777, 3.97356489898978182], [-1.51340326906952893, -2.30300497467102216, 3.86987085922398588], [-1.51499315447260785, -2.24443430292238189, 3.77145121258921678], [-1.51650419186229812, -2.18876893670847217, 3.67791351686191392], [-1.51794210583076405, -2.13579793785233152, 3.58890332056972072], [-1.51931207989173345, -2.08533030573375155, 3.50409967445617943], [-1.5206188189321681, -2.03719267608137367, 3.42321126462959358], [-1.52186660320923384, -1.99122733130180163, 3.34597306920534843], [-1.52305933519848646, -1.94729047422637547, 3.27214345758396297], [-1.52420058037357831, -1.90525072546697305, 3.20150166547218085], [-1.52529360281526083, -1.86498781130250224, 3.13384559006366858], [-1.52634139639868716, -1.82639141449415532, 3.06898985899821453], [-1.52734671218664264, -1.78936016490485317, 3.00676413424190514], [-1.52831208255648776, -1.753800750474658, 2.94701161820830926], [-1.52923984250626299, -1.71962713213555718, 2.88958773453492723], [-1.53013214851748947, -1.6867598487594373, 2.83435896014755473], [-1.31625137675109638, -9.11250953135374431, 15.8793916092664311], [-1.34136761307980446, -8.23111944389879824, 14.3434877716829075], [-1.36205375712847943, -7.50519417193243843, 13.0784957699785629], [-1.37938692346396774, -6.8969346173198387, 12.0185471757369786], [-1.39412122403593552, -6.37987678796106117, 11.1175260323543679], [-1.40680040077256607, -5.93493919075926346, 10.3421810713045677], [-1.41782640017839068, -5.54801634852413805, 9.66793219252076597], [-1.42750276564840806, -5.20845603682527258, 9.07621690861589236], [-1.43606301407702053, -4.90806346583285436, 8.55275503953465943], [-1.44368972833088627, -4.6404312696349912, 8.08638115690097692], [-1.45052772613783287, -4.400477371429381, 7.66823927132416294], [-1.45669334411323104, -4.18411917989970572, 7.29121508941782093], [-1.46228111131939498, -3.98803939450744105, 6.94952790783611452], [-1.46736863036493093, -3.80951471344741632, 6.6384321210259607], [-1.47202020365674069, -3.64628857786532068, 6.35399546624308709], [-1.47628956570745351, -3.49647528720186296, 6.09293193566102431], [-1.48022196845635801, -3.35848681918669456, 5.85247425343459149], [-1.48385579155198677, -3.2309763203148103, 5.63027540262266069], [-1.4872237992283559, -3.11279399838493109, 5.42433176385225924], [-1.49035413107028836, -3.00295235364908875, 5.23292252737739361], [-1.49327109015971571, -2.90059852045412381, 5.05456149582838954], [-1.4959957753528963, -2.80499207878668022, 4.88795841877456638], [-1.49854659250425271, -2.71548711393388098, 4.73198773002181916], [-1.50093967083989899, -2.63151760471930318, 4.58566308526085997], [-1.50318920439616344, -2.55258544142744714, 4.44811648219116318], [-1.50530773379868377, -2.47825053735149137, 4.31858102897732099], [-1.50730638019958674, -2.40812261925377813, 4.19637663836630548], [-1.50919504058862586, -2.34185437332717816, 4.08089808389050823], [-1.51098255171961404, -2.27913569253796533, 3.97160497533004664], [-1.51267682838240503, -2.21968882425679004, 3.86801330301044333], [-1.51428498058551941, -2.16326425797931288, 3.76968827177506238], [-1.51581341330890607, -2.10963722470827131, 3.67623820083422848], [-1.5172679117784853, -2.05860470442307042, 3.58730930900390677], [-1.51865371465596932, -2.00998285763290063, 3.50258123894918416], [-1.51997557709605435, -1.9636048125164336, 3.42176320107030385], [-1.5212378252706058, -1.91931875150964282, 3.34459063920476707], [-1.52244440367721978, -1.87698625110890105, 3.27082233758051144], [-1.52359891632225741, -1.83648083663843531, 3.2002379023643841], [-1.52470466268408855, -1.79768672019521336, 3.13263556241425123], [-1.52576466921243359, -1.76049769524511568, 3.0678302430104698], [-1.52678171699710252, -1.72481616564526208, 3.00565187383739207], [-1.52775836613889382, -1.69055229039959554, 2.94594389864077622], [-1.52869697727215326, -1.65762322836739528, 2.88856195906244251], [-1.52959973062012544, -1.62595246955682637, 2.83337272935735873], [-1.31368369685936415, -8.75789131239576157, 15.8484148557007902], [-1.33900553458329141, -7.91230543162854083, 14.3182296368508784], [-1.35986823809643531, -7.21562738581781993, 13.057510327028016], [-1.3773543689598815, -6.63170622091794826, 12.0008376036226707], [-1.39222229985238899, -6.13521691460374807, 11.1023829166194741], [-1.4050191159534966, -5.70789015856108062, 10.3290858446268796], [-1.41614940609831197, -5.33621515341392882, 9.6564970372355905], [-1.42591882053829977, -5.00998504513456755, 9.06614601436851508], [-1.43456257172236046, -4.72134517275713517, 8.54381886070089358], [-1.4422646196615394, -4.46415239419047882, 8.07839885179469341], [-1.44917091054716352, -4.23353299710407338, 7.66106644283641014], [-1.45539871346574401, -4.02557090958609987, 7.28473504984715348], [-1.4610433340937119, -3.83708350368045492, 6.94364534031405434], [-1.46618302663605449, -3.66545756659013655, 6.63306840415638188], [-1.4708826439840359, -3.50852740766834259, 6.34908517426136587], [-1.47519638871486669, -3.36448299180583632, 6.08842018324863865], [-1.47916991315129631, -3.23179981024652951, 5.84831465661920102], [-1.48284194135938718, -3.10918471575355415, 5.62642849523864363], [-1.48624553540821203, -2.99553363725686195, 5.42076375511289843], [-1.48940909370586594, -2.88989824151884411, 5.22960431782544699], [-1.49235714529617969, -2.7914594084676736, 5.05146789109246352], [-1.49511098716516422, -2.69950594904821273, 4.88506749625840087], [-1.49768919960285052, -2.61341739528014205, 4.72928032492041073], [-1.50010806600138547, -2.53264998156078081, 4.5831223704782591], [-1.50238191714349956, -2.45672514753025073, 4.44572762274224242], [-1.50452341536579803, -2.38522004875065496, 4.31633089591224373], [-1.50654379050041465, -2.31775967769294544, 4.19425357059435022], [-1.50845303687942578, -2.25401028499224543, 4.07889168880327446], [-1.51026007869797207, -2.19367385732666298, 3.9697059610469041], [-1.51197290951058938, -2.13648345909104975, 3.86621333654745802], [-1.51359871046133532, -2.08219928423252476, 3.76797985858231899], [-1.51514395093629184, -2.03060529506925702, 3.67461458203879054], [-1.51661447461372911, -1.98150634874155562, 3.58576437339577669], [-1.5180155733249634, -1.93472573070828657, 3.50110944730095719], [-1.51935205069400392, -1.89010302957148801, 3.42035952082071182], [-1.52062827716890792, -1.84749229936409365, 3.34325048788771584], [-1.52184823777345435, -1.8067604649365212, 3.26954153366397371], [-1.52301557367826423, -1.76778593373369963, 3.19901262239117568], [-1.52413361850502227, -1.7304573834554835, 3.13146230352232724], [-1.52520543012621057, -1.69467270014023352, 3.06670579006146138], [-1.5262338185991644, -1.66033804533800278, 3.00457327050588585], [-1.52722137077175502, -1.62736703442891906, 2.94490842191848623], [-1.52817047201352829, -1.59568001093782086, 2.88756709671632628], [-1.5290833254564391, -1.56520340401052827, 2.83241615994982165], [-1.31121142990154915, -8.40520147372787996, 15.8185891735558695], [-1.33673023603250174, -7.59505815927557748, 14.2938994557566375], [-1.35776223209953573, -6.92735832703844689, 13.0372883714863583], [-1.37539513429908977, -6.36757006619948829, 11.9837668645874409], [-1.39039135915559697, -5.89148880998134228, 11.0877819403848878], [-1.40330117167799417, -5.48164520186716508, 10.3164562699140046], [-1.41453167797587964, -5.12511477527492509, 9.64546600706741053], [-1.42439053805124582, -4.81213019611907367, 9.05642902909609759], [-1.43311458347641651, -4.53517273252030506, 8.53519508260321302], [-1.44088909049529557, -4.28836038837885525, 8.07069425092900694], [-1.44786108466304331, -4.06702551871641305, 7.65414202622429052], [-1.4541487275441225, -3.86741682857479319, 7.27847847137776149], [-1.45984806854080484, -3.68648502156768876, 6.93796481059039127], [-1.46503798617092107, -3.52172592829548314, 6.62788819705209953], [-1.46978386099315861, -3.37106390136045553, 6.34434226236037802], [-1.47414034442558295, -3.23276391321399759, 6.08406168466874497], [-1.4781534728667165, -3.10536443879562229, 5.84429587381336191], [-1.48186230087805337, -2.98762560660897858, 5.62271139163809774], [-1.485300176408064, -2.87848871396911621, 5.41731575968987666], [-1.48849574636473969, -2.77704430291929016, 5.22639737809410487], [-1.49147375679423355, -2.68250675682416118, 5.04847771634307119], [-1.49425569499919164, -2.5941939149291513, 4.88227294789666377], [-1.49686030886136279, -2.51151058533785676, 4.726662921605846], [-1.49930402991890177, -2.43393511350471003, 4.58066588361586469], [-1.5016013203847125, -2.36100836538476777, 4.44341774365413222], [-1.50376495959419465, -2.29232463352773541, 4.31415496029919865], [-1.50580628186819654, -2.22752408560384119, 4.19220032910642892], [-1.5077353751406859, -2.1662864585354682, 4.07695111496375162], [-1.50956124769976463, -2.10832576494380453, 3.96786908962424079], [-1.5112919688582882, -2.0533858272531087, 3.86447212689035036], [-1.5129347881890145, -2.00123649231351086, 3.76632707853402771], [-1.51449623704050618, -1.95167040855735308, 3.67304370890493903], [-1.51598221533172994, -1.90450027051724846, 3.58426950911346198], [-1.51739806605708649, -1.85955645350133114, 3.49968524548950599], [-1.51874863948531802, -1.81668497546090668, 3.41900112381742671], [-1.52003834867790455, -1.77574573443680417, 3.34195347221006589], [-1.52127121766626971, -1.73661097907108397, 3.26830186261177991], [-1.52245092339574972, -1.69916397700418487, 3.19782660472187619], [-1.52358083235738384, -1.66329785191854107, 3.13032655731069465], [-1.5246640326762837, -1.62891456482508912, 3.06561721100081819], [-1.52570336230057202, -1.59592401914285764, 3.0035290040268583], [-1.52670143383290147, -1.56424327236977589, 2.94390583859991839], [-1.52766065646187998, -1.53379583982116463, 2.8866037705434322], [-1.52858325538109141, -1.50451107813099538, 2.83148984904253354], [-1.27700213098832904, -9.01413268932937939, 17.6714559597061402], [-1.30883575740194535, -8.05437389170427842, 15.7899288168619307], [-1.33454289144508631, -7.27932486242774335, 14.2705097823843889], [-1.35573688944428139, -6.64034394829852204, 13.0178409486435598], [-1.37351033732399341, -6.10449038810663769, 11.9673446983507219], [-1.38862948292846911, -5.64866230343784004, 11.0737317240312638], [-1.40164761025950657, -5.25617853847314986, 10.3043000097984052], [-1.41297421918694233, -4.91469293630240855, 9.63484594387617932], [-1.42291888327084659, -4.61487205385139454, 9.04707208890450509], [-1.43171997730447753, -4.3495290449756272, 8.52688923192096837], [-1.43956403314314541, -4.11304009469470166, 8.06327235230773631], [-1.44659910674746328, -3.90094141145383411, 7.6474705587042866], [-1.45294421221512327, -3.70964479714499484, 7.27244948773633304], [-1.45869610977703368, -3.53623299339886987, 6.9324900974757], [-1.46393427493658979, -3.37830986523828392, 6.6228949649775517], [-1.46872459303892522, -3.23388901219579861, 6.33976991765884623], [-1.47312214504435524, -3.101309779040748, 6.0798593793277993], [-1.47717333503964876, -2.97917311268500606, 5.84042062299289633], [-1.48091753407881432, -2.86629200144286722, 5.61912661116195356], [-1.48438836395458718, -2.76165277014806954, 5.41399011814444453], [-1.4876147096748793, -2.66438455464157498, 5.22330388732858708], [-1.49062152525564962, -2.57373500763565355, 5.04559300455239512], [-1.4934304804342422, -2.48905080072373641, 4.87957667391882488], [-1.49606048377571788, -2.40976185306155921, 4.72413729943943128], [-1.49852810888395349, -2.33536848137759012, 4.57829529370065025], [-1.50084794402966781, -2.26543085891270612, 4.44118841299345046], [-1.50303288078279951, -2.19956031334068225, 4.31205469761162874], [-1.50509435371316869, -2.13741209994769576, 4.19021830427246122], [-1.50704254057142739, -2.07867936630541683, 4.07507767436124446], [-1.50888653034862785, -2.02308808538363527, 3.966095600720835], [-1.51063446507104571, -1.97039278052745104, 3.86279084682568996], [-1.51229365999756049, -1.92037290158420371, 3.76473104248069879], [-1.51387070596240747, -1.87282973933493735, 3.67152663482119967], [-1.51537155688284786, -1.8275837871953946, 3.58282571614763823], [-1.51680160488130578, -1.78447247633094808, 3.49830958380712875], [-1.5181657450201187, -1.74334822394654787, 3.41768891402854491], [-1.51946843128673548, -1.70407674536830189, 3.34070045289910844], [-1.5207137251787668, -1.66653558923700484, 3.26710414473337796], [-1.52190533800529271, -1.63061286214852807, 3.19668063183700957], [-1.52304666783283693, -1.59620611475930518, 3.12922907080938772], [-1.52414083185036842, -1.56322136600037798, 3.0645652195965738], [-1.52519069480278979, -1.53157224582706908, 3.00251975692348294], [-1.52619889403891773, -1.50117924003827974, 2.94293680182504414], [-1.52716786163513518, -1.47196902326278134, 2.8856726060214104], [-1.52809984398539567, -1.44387386833265752, 2.8305943960438138], [-1.27451383183629097, -8.62171121536314544, 17.6370222905580825], [-1.30655788172086385, -7.70534135373842854, 15.7624482910170887], [-1.3324446917439301, -6.96505179775236183, 14.2480733514890687], [-1.35379337447281856, -6.3545403291581275, 12.9991792385604317], [-1.37170110775044241, -5.84243064412225532, 11.9515809480848745], [-1.38693776235414457, -5.40670653121107225, 11.0602409692817787], [-1.40005948288717041, -5.03146376662576866, 10.2926247921627141], [-1.41147804091394846, -4.70492680304649458, 9.62464374275380763], [-1.42150482820972912, -4.41819068227348222, 9.03808137395510336], [-1.43037968736993726, -4.16439655816563992, 8.51890687224753762], [-1.43829034549756796, -3.93817594600524545, 8.05613818519768721], [-1.44538584011783389, -3.73526677783260519, 7.64105660421843691], [-1.45178599794765839, -3.55224233540384438, 7.26665225568482143], [-1.45758825712690321, -3.38631615292108812, 6.92722499977987827], [-1.46287266276305816, -3.2351991580336863, 6.61809219067325749], [-1.46770558203455947,
flakey tmpDict["detFail"] = 0 else: # If 1st test passes tmpDict["fail"] = 0 tmpDict["detFail"] = 0 # Store Info from this row abstractTests[abstractTest] = tmpDict countCT = countCT + 1 countRun = countRun + runNumber # Handle data for full summary file # Count unique AT uniqueAT = len(abstractTests) # Global Counts are now done lis = sumPath.split("-") testSuite = lis[-3] modelName = lis[-2] # Need to store to persistant object: based on # Dictionary : Subject Model Name : Dict (variables : values) values = {} # Total # of AT values["AT"] = uniqueAT # Total # of CT values["CT"] = countCT # Total # of Runs values["Run"] = countRun # Total # of Runs Failing values["RunFailed"] = countRunNP # Total # of CT Failing (At least once) - Flakily values["CTFailed"] = countCTFail # Total # of CT Failing - Deterministcially values["CTFailedDet"] = countCTFailDet name = testSuite + '-' + modelName val = [name, values] except: #print(sumPath + " Is missing summary file") # Error catching code for Table generation lis = sumPath.split("-") testSuite = lis[-3] modelName = lis[-2] name = testSuite + '-' + modelName values = {} values["AT"] = 0 values["CT"] = 0 values["Run"] = 0 values["RunFailed"] = 0 values["CTFailed"] = 0 values["CTFailedDet"] = 0 val = [name, values] return val def processSubject(subject): #print(subject) # Load Summaries # Change Directory into subject os.chdir(subject) ##print("Current working directory: {0}".format(os.getcwd())) # Get all Simulation folder simFolders = getSubjects() dataList = {} for sim in simFolders: os.chdir(sim) # Parse folder name lis = sim.split("-") subject = lis[0] testSuite = lis[1] mutation = lis[2] # Load *-Summary.csv from each directory head = os.getcwd() + "/" + subject + "-" + testSuite + "-" + mutation + "-" tail = "summary.csv" sumPath = head + tail # Load SummaryFile from Folder [name, summaryData] = loadSummaryFile(sumPath) if '00' in name: # Skip OO and 100 - golden models pass else: if summaryData['AT'] == 0: pass # Skip bad models else: # Add Data to List dataList[name] = summaryData # Return to Subject Directory os.chdir('..') ## Notes for Data inport ## # Counting NPs only # Data to current naming conversions # EQ = IN w/ RC # U = IR # M = S ## At this point have loaded and summed all required data # Process Test Suites # Base invariants dataF = {} dataIN = {} dataEQ = {} dataU = {} # Combos dataFIN = {} dataEQU = {} dataFEQU = {} # Base specifications dataM = {} #for testSuite in ['F','IN','EQ','U','M'] # F for k in dataList: if 'F' in k: # Grab all Data related to test suite lis = k.split("-") modelName = lis[-1] values = dataList[k] # F dataF[modelName] = values # FIN dataFIN[modelName] = values # FEQU dataFEQU[modelName] = values # IN for k in dataList: if 'IN' in k: # Grab all Data related to test suite lis = k.split("-") modelName = lis[-1] values = dataList[k] # IN dataIN[modelName] = values # FIN try: tmpVal = dataFIN[modelName] tmp2 = {} tmp2['AT'] = tmpVal['AT'] + values['AT'] tmp2['CT'] = tmpVal['CT'] + values['CT'] tmp2['CTFailed'] = tmpVal['CTFailed'] + values['CTFailed'] tmp2['CTFailedDet'] = tmpVal['CTFailedDet'] + values['CTFailedDet'] tmp2['Run'] = tmpVal['Run'] + values['Run'] tmp2['RunFailed'] = tmpVal['RunFailed'] + values['RunFailed'] dataFIN[modelName] = tmp2 except: dataFIN[modelName] = values # EQ for k in dataList: if 'EQ' in k: # Grab all Data related to test suite lis = k.split("-") modelName = lis[-1] values = dataList[k] # U dataEQ[modelName] = values # EQU dataEQU[modelName] = values # FEQU try: tmpVal = dataFEQU[modelName] tmp2 = {} tmp2['AT'] = tmpVal['AT'] + values['AT'] tmp2['CT'] = tmpVal['CT'] + values['CT'] tmp2['CTFailed'] = tmpVal['CTFailed'] + values['CTFailed'] tmp2['CTFailedDet'] = tmpVal['CTFailedDet'] + values['CTFailedDet'] tmp2['Run'] = tmpVal['Run'] + values['Run'] tmp2['RunFailed'] = tmpVal['RunFailed'] + values['RunFailed'] dataFEQU[modelName] = tmp2 except: dataFEQU[modelName] = values # U for k in dataList: if 'U' in k: # Grab all Data related to test suite lis = k.split("-") modelName = lis[-1] values = dataList[k] # U dataU[modelName] = values # EQU try: tmpVal = dataEQU[modelName] tmp2 = {} tmp2['AT'] = tmpVal['AT'] + values['AT'] tmp2['CT'] = tmpVal['CT'] + values['CT'] tmp2['CTFailed'] = tmpVal['CTFailed'] + values['CTFailed'] tmp2['CTFailedDet'] = tmpVal['CTFailedDet'] + values['CTFailedDet'] tmp2['Run'] = tmpVal['Run'] + values['Run'] tmp2['RunFailed'] = tmpVal['RunFailed'] + values['RunFailed'] dataEQU[modelName] = tmp2 except: dataEQU[modelName] = values # FEQU try: tmpVal = dataFEQU[modelName] tmp2 = {} tmp2['AT'] = tmpVal['AT'] + values['AT'] tmp2['CT'] = tmpVal['CT'] + values['CT'] tmp2['CTFailed'] = tmpVal['CTFailed'] + values['CTFailed'] tmp2['CTFailedDet'] = tmpVal['CTFailedDet'] + values['CTFailedDet'] tmp2['Run'] = tmpVal['Run'] + values['Run'] tmp2['RunFailed'] = tmpVal['RunFailed'] + values['RunFailed'] dataFEQU[modelName] = tmp2 except: dataFEQU[modelName] = values # M for k in dataList: if 'M' in k: # Grab all Data related to test suite lis = k.split("-") modelName = lis[-1] values = dataList[k] # M dataM[modelName] = values ############################## ############################## ############################## ## F ## sumCT = 0 sumNP = 0 sumNPdet = 0 sumRunFailed = 0 sumRun = 0 mutants = 0 mutantsFound = 0 flakeOnly = 0 # Generate Percentages, for each mutant for k in dataF: values = dataF[k] sumCT = sumCT + values['CT'] sumNP = sumNP + values['CTFailed'] sumNPdet = sumNPdet + values['CTFailedDet'] sumRunFailed = sumRunFailed + values['RunFailed'] sumRun = sumRun + values['Run'] # Do math if values['CT'] > 0: values['PercentNP'] = values['CTFailed'] / values['CT'] values['PercentNPdet'] = values['CTFailedDet'] / values['CT'] values['PercentRun'] = values['RunFailed'] / values['Run'] else: values['PercentNP'] = 0 values['PercentNPdet'] = 0 values['PercentRun'] = 0 # Count Mutants Detected mutants = mutants + 1 if values['CTFailed'] > 0: mutantsFound = mutantsFound + 1 if values['CTFailedDet'] == 0 and values['CTFailed'] > 0: flakeOnly = flakeOnly + 1 dataF[k] = values try: dataF['AT'] = values['AT'] except: dataF['AT'] = 0 dataF['sumCT'] = sumCT dataF['sumNP'] = sumNP dataF['sumNPdet'] = sumNPdet dataF['sumRunFailed'] = sumRunFailed dataF['sumRun'] = sumRun if dataF['sumCT'] > 0: dataF['PercentNP'] = dataF['sumNP'] / dataF['sumCT'] dataF['PercentNPdet'] = dataF['sumNPdet'] / dataF['sumCT'] dataF['PercentRun'] = dataF['sumRunFailed'] / dataF['sumRun'] dataF['mutantPercent'] = mutantsFound / mutants dataF['flakeOnly'] = flakeOnly / mutants else: dataF['PercentNP'] = 0 dataF['PercentNPdet'] = 0 dataF['PercentRun'] = 0 ## IN ## sumCT = 0 sumNP = 0 sumNPdet = 0 sumRunFailed = 0 sumRun = 0 mutants = 0 mutantsFound = 0 flakeOnly = 0 # Generate Percentages, for each mutant for k in dataIN: values = dataIN[k] sumCT = sumCT + values['CT'] sumNP = sumNP + values['CTFailed'] sumNPdet = sumNPdet + values['CTFailedDet'] sumRunFailed = sumRunFailed + values['RunFailed'] sumRun = sumRun + values['Run'] # Do math if values['CT'] > 0: values['PercentNP'] = values['CTFailed'] / values['CT'] values['PercentNPdet'] = values['CTFailedDet'] / values['CT'] values['PercentRun'] = values['RunFailed'] / values['Run'] else: values['PercentNP'] = 0 values['PercentNPdet'] = 0 values['PercentRun'] = 0 # Count Mutants Detected mutants = mutants + 1 if values['CTFailed'] > 0: mutantsFound = mutantsFound + 1 if values['CTFailedDet'] == 0 and values['CTFailed'] > 0: flakeOnly = flakeOnly + 1 dataIN[k] = values try: dataIN['AT'] = values['AT'] except: dataIN['AT'] = 0 dataIN['sumCT'] = sumCT dataIN['sumNP'] = sumNP dataIN['sumNPdet'] = sumNPdet dataIN['sumRunFailed'] = sumRunFailed dataIN['sumRun'] = sumRun if dataIN['sumCT'] > 0: dataIN['PercentNP'] = dataIN['sumNP'] / dataIN['sumCT'] dataIN['PercentNPdet'] = dataIN['sumNPdet'] / dataIN['sumCT'] dataIN['PercentRun'] = dataIN['sumRunFailed'] / dataIN['sumRun'] dataIN['mutantPercent'] = mutantsFound / mutants dataIN['flakeOnly'] = flakeOnly / mutants else: dataIN['PercentNP'] = 0 dataIN['PercentNPdet'] = 0 dataIN['PercentRun'] = 0 ## EQ ## sumCT = 0 sumNP = 0 sumNPdet = 0 sumRunFailed = 0 sumRun = 0 mutants = 0 mutantsFound = 0 flakeOnly = 0 # Generate Percentages, for each mutant for k in dataEQ: values = dataEQ[k] sumCT = sumCT + values['CT'] sumNP = sumNP + values['CTFailed'] sumNPdet = sumNPdet + values['CTFailedDet'] sumRunFailed = sumRunFailed + values['RunFailed'] sumRun = sumRun + values['Run'] # Do
self.n + 1 #print "n = ", self.n #print "clauseCount = ", self.clauseCount total_vars_in_parity_constraints = 0 for i in range(0, m): new_function = [] for atom in range(1, self.n + 1): if random.random() < variable_probs[atom-1]: new_function.append(atom) total_vars_in_parity_constraints += 1 if len(new_function) == 0: if random.randint(0, 1) == 0: continue else: self.fail_apriori = True print 'self.fail_apriori = True' for prob in variable_probs: print prob exit(0) return if random.randint(0, 1) == 0: new_function[0] = -new_function[0] if self.max_xor > 0: while len(new_function) > self.max_xor: temp = new_function[0 : self.max_xor - 1] new_function = [cur_index] + new_function[self.max_xor - 1:] temp.append(cur_index) cur_index += 1 self.new_variables += 1 self.hash_functions.append(temp) self.hash_functions.append(new_function) if self.verbose: print("Generated %d parity constraints" % m) if self.max_xor > 0: print("Maximum xor length is %d. Added %d new variables" % (self.max_xor, self.new_variables)) print 'empirical density = ', total_vars_in_parity_constraints/(self.n*m) def add_parity_constraints_restrict_vars(self, m, f, var_restriction): """ Test whether variable overlap between parity constraints makes solving time faster or slower Add m parity constraints, each parity constraint contains f*n variables in expectation, restrict to including only a subset of variables in the parity constraints Inputs: - m: (int) the number of parity constraints - f: (float) each parity constraint contains f*n variables in expectation (n is total number of variables) - var_restriction: (int) only the first var_restriction variables can be included in any parity constraints """ if var_restriction/self.n < f: print 'var_restriction is too small, cannot increase f sufficiently' return else: #compute new f so that we maintain the density f or variables in parity constraints, but restricted to a subset of variables new_f = self.n*f/var_restriction f = new_f self.hash_functions = [] self.new_variables = 0 self.fail_apriori = False cur_index = self.n + 1 #print '-'*80 #print "n = ", self.n #print "clauseCount = ", self.clauseCount total_vars_in_parity_constraints = 0 for i in range(0, m): new_function = [] for atom in range(1, var_restriction + 1): if random.random() < f: new_function.append(atom) total_vars_in_parity_constraints += 1 if len(new_function) == 0: if random.randint(0, 1) == 0: continue else: self.fail_apriori = True return if random.randint(0, 1) == 0: new_function[0] = -new_function[0] if self.max_xor > 0: while len(new_function) > self.max_xor: temp = new_function[0 : self.max_xor - 1] new_function = [cur_index] + new_function[self.max_xor - 1:] temp.append(cur_index) cur_index += 1 self.new_variables += 1 self.hash_functions.append(temp) self.hash_functions.append(new_function) print 'empirical density = ', total_vars_in_parity_constraints/(self.n*m) if self.verbose: print("Generated %d parity constraints" % m) if self.max_xor > 0: print("Maximum xor length is %d. Added %d new variables" % (self.max_xor, self.new_variables)) def add_regular_constraints(self, m, f): """ Add m parity constraints, according to the new combined ensemble """ if m * f <= 1: return self.add_parity_constraints(m, f) print("Using regular") k_low = int(math.floor(float(self.n) / m)) k_high = int(math.ceil(float(self.n) / m)) k_range = [0] for i in range(m): if (self.n - k_range[i]) % k_low == 0: k_high = k_low k_range.append(k_range[i] + k_high) f_updated = f - 1.0 / float(m) self.hash_functions = [] self.new_variables = 0 self.fail_apriori = False curIndex = self.n + 1 for i in range(0, m): new_function = [] for atom in range(1, self.n + 1): if random.random() < f_updated or (atom > k_range[i] and atom <= k_range[i+1]): new_function.append(atom) if len(new_function) == 0: if random.randint(0, 1) == 0: continue else: self.fail_apriori = True return if random.randint(0, 1) == 0: new_function[0] = -new_function[0] if self.max_xor > 0: while len(new_function) > self.max_xor: temp = new_function[0:self.max_xor - 1] new_function = [curIndex] + new_function[self.max_xor - 1:] temp.append(curIndex) curIndex += 1 self.new_variables += 1 self.hash_functions.append(temp) self.hash_functions.append(new_function) if self.verbose: print("Generated %d parity constraints" % m) if self.max_xor > 0: print("Maximum xor length is %d. Added %d new variables" % (self.max_xor, self.new_variables)) def add_regular_constraints_constantF(self, m, f): """ Add m parity constraints, according to the new combined ensemble without decreasing f """ print("Using regular") k_low = int(math.floor(float(self.n) / m)) k_high = int(math.ceil(float(self.n) / m)) k_range = [0] for i in range(m): if (self.n - k_range[i]) % k_low == 0: k_high = k_low k_range.append(k_range[i] + k_high) f_updated = f self.hash_functions = [] self.new_variables = 0 self.fail_apriori = False curIndex = self.n + 1 for i in range(0, m): new_function = [] for atom in range(1, self.n + 1): if random.random() < f_updated or (atom > k_range[i] and atom <= k_range[i+1]): new_function.append(atom) if len(new_function) == 0: if random.randint(0, 1) == 0: continue else: self.fail_apriori = True return if random.randint(0, 1) == 0: new_function[0] = -new_function[0] if self.max_xor > 0: while len(new_function) > self.max_xor: temp = new_function[0:self.max_xor - 1] new_function = [curIndex] + new_function[self.max_xor - 1:] temp.append(curIndex) curIndex += 1 self.new_variables += 1 self.hash_functions.append(temp) self.hash_functions.append(new_function) if self.verbose: print("Generated %d parity constraints" % m) if self.max_xor > 0: print("Maximum xor length is %d. Added %d new variables" % (self.max_xor, self.new_variables)) def add_regular_constraints_constantF_permuted(self, m, f, f_block, permute=True, k=None, \ ADD_CONSTRAINT_ALL_ONES=False, change_var_names=True, variable_subset=None): """ Add m parity constraints, according to the new combined ensemble without decreasing f, adding block 1's with probability f_block, and permuting columns """ if ADD_CONSTRAINT_ALL_ONES: m_effective = m - 1 else: m_effective = m if k==None or k*m_effective > self.n: #use k = n/m_effective k_low = int(math.floor(float(self.n) / m_effective)) k_high = int(math.ceil(float(self.n) / m_effective)) else: k_low = k k_high = k number_k_high_blocks = self.n%m_effective k_range = [0] for i in range(number_k_high_blocks): k_range.append(k_range[i] + k_high) for i in range(number_k_high_blocks, m_effective): k_range.append(k_range[i] + k_low) if k==None or k*m_effective > self.n: #use k = n/m_effective assert(k_range[-1] == self.n) # print k_range block_diag_matrix = np.zeros((m, self.n)) #construct block diagonal 1's matrix for i in range(0, m): for atom in range(1, self.n+1): if ADD_CONSTRAINT_ALL_ONES: if i == 0: block_diag_matrix[i, atom-1] = 1 elif (atom > k_range[(i-1)] and atom <= k_range[(i-1)+1]): block_diag_matrix[i, atom-1] = 1 else: if (atom > k_range[i] and atom <= k_range[i+1]): block_diag_matrix[i, atom-1] = 1 # print block_diag_matrix #permute the columns of the block diagonal matrix if permute and (not change_var_names): permuted_block_diag_matrix = np.swapaxes(np.random.permutation(np.swapaxes(block_diag_matrix,0,1)),0,1) elif permute and change_var_names: #permute columns of the parity constraint matrix, but keep blocks of ones by renaming the orginal variables permuted_vars_matrix = np.swapaxes(np.random.permutation(np.swapaxes(block_diag_matrix,0,1)),0,1) new_var_name = 1 orig_var_names = set() #dictionary with key: original variable name (1 indexd), value: new variable name (1 indexd) new_var_names = {} for row in range(permuted_vars_matrix.shape[0]): for col in range(permuted_vars_matrix.shape[1]): if permuted_vars_matrix[row, col] == 1: new_var_names[col+1] = new_var_name orig_var_names.add(col+1) new_var_name+=1 for orig_var_name in range(1, self.n+1): if not orig_var_name in orig_var_names: new_var_names[orig_var_name] = new_var_name new_var_name+=1 orig_var_names.add(orig_var_name) assert(len(orig_var_names) == self.n), (len(orig_var_names), self.n) assert(new_var_name == self.n + 1), (new_var_name, self.n + 1) renamed_clauses = [] #perform duplication for cur_clause in self.clauses: new_clause = [] for literal in cur_clause.split(): var = np.abs(int(literal)) literal_sign = np.sign(int(literal)) if var > 0: if var in new_var_names: new_literal = literal_sign*(new_var_names[var]) else: new_literal = literal new_clause.append(new_literal) else: new_clause.append(0) renamed_clauses.append(' '.join(str(var) for var in new_clause)) self.clauses = renamed_clauses permuted_block_diag_matrix = block_diag_matrix #print new_var_names #print permuted_block_diag_matrix #prin else: permuted_block_diag_matrix = block_diag_matrix # print permuted_block_diag_matrix f_updated = f self.hash_functions = [] self.new_variables = 0 self.fail_apriori = False curIndex = self.n + 1 total_vars_in_parity_constraints = 0 for i in range(0, m): new_function = [] for atom in range(1, self.n + 1): #check block diagonal construction if ADD_CONSTRAINT_ALL_ONES: if i == 0: assert (block_diag_matrix[i, atom-1] == 1) elif (atom > k_range[(i-1)] and atom <= k_range[(i-1)+1]): assert (block_diag_matrix[i, atom-1] == 1) else: assert (block_diag_matrix[i, atom-1] == 0) else: if (atom > k_range[i] and atom <= k_range[i+1]): assert (block_diag_matrix[i, atom-1] == 1) else: assert (block_diag_matrix[i, atom-1] == 0) #is this element part of a permuted block? if (permuted_block_diag_matrix[i, atom-1] == 1): #if so, add variable with probabilty f_block if (random.random() < f_block and variable_subset == None) or\ (random.random() < f_block and atom in variable_subset): new_function.append(atom) total_vars_in_parity_constraints += 1 #if this element isn't part of a permuted block, add variable with probability f_updated elif (random.random() < f_updated and variable_subset == None) or\ (random.random() < f_updated and atom in variable_subset): new_function.append(atom) total_vars_in_parity_constraints += 1 if len(new_function) == 0: if random.randint(0, 1) == 0: continue
defined custom resource have. """ additionalPrinterColumns = _messages.MessageField('CustomResourceColumnDefinition', 1, repeated=True) deprecated = _messages.BooleanField(2) deprecationWarning = _messages.StringField(3) name = _messages.StringField(4) schema = _messages.MessageField('CustomResourceValidation', 5) served = _messages.BooleanField(6) storage = _messages.BooleanField(7) subresources = _messages.MessageField('CustomResourceSubresources', 8) class CustomResourceSubresourceScale(_messages.Message): r"""CustomResourceSubresourceScale defines how to serve the scale subresource for CustomResources. Fields: labelSelectorPath: LabelSelectorPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Selector. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. Must be set to work with HPA. If there is no value under the given path in the CustomResource, the status label selector value in the /scale subresource will default to the empty string. +optional specReplicasPath: SpecReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Spec.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .spec. If there is no value under the given path in the CustomResource, the /scale subresource will return an error on GET. statusReplicasPath: StatusReplicasPath defines the JSON path inside of a CustomResource that corresponds to Scale.Status.Replicas. Only JSON paths without the array notation are allowed. Must be a JSON Path under .status. If there is no value under the given path in the CustomResource, the status replica value in the /scale subresource will default to 0. """ labelSelectorPath = _messages.StringField(1) specReplicasPath = _messages.StringField(2) statusReplicasPath = _messages.StringField(3) class CustomResourceSubresourceStatus(_messages.Message): r"""CustomResourceSubresourceStatus defines how to serve the status subresource for CustomResources. Status is represented by the `.status` JSON path inside of a CustomResource. When set, * exposes a /status subresource for the custom resource * PUT requests to the /status subresource take a custom resource object, and ignore changes to anything except the status stanza * PUT/POST/PATCH requests to the custom resource ignore changes to the status stanza """ class CustomResourceSubresources(_messages.Message): r"""CustomResourceSubresources defines the status and scale subresources for CustomResources. Fields: scale: Scale denotes the scale subresource for CustomResources +optional status: Status denotes the status subresource for CustomResources +optional """ scale = _messages.MessageField('CustomResourceSubresourceScale', 1) status = _messages.MessageField('CustomResourceSubresourceStatus', 2) class CustomResourceValidation(_messages.Message): r"""CustomResourceValidation is a list of validation methods for CustomResources. Fields: openAPIV3Schema: OpenAPIV3Schema is the OpenAPI v3 schema to be validated against. """ openAPIV3Schema = _messages.MessageField('JSONSchemaProps', 1) class DeliverySpec(_messages.Message): r"""From https://github.com/knative/eventing/blob/master/pkg/apis/duck/v1/de livery_types.go Fields: backoffDelay: BackoffDelay is the delay before retrying. More information on Duration format: - https://www.iso.org/iso-8601-date-and-time- format.html - https://en.wikipedia.org/wiki/ISO_8601 For linear policy, backoff delay is the time interval between retries. For exponential policy , backoff delay is backoffDelay*2^. +optional BackoffDelay *string `json:"backoffDelay,omitempty" backoffPolicy: BackoffPolicy is the retry backoff policy (linear, exponential). deadLetterSink: DeadLetterSink is the sink receiving event that could not be sent to a destination. retry: Retry is the minimum number of retries the sender should attempt when sending an event before moving it to the dead letter sink. """ backoffDelay = _messages.StringField(1) backoffPolicy = _messages.StringField(2) deadLetterSink = _messages.MessageField('Destination', 3) retry = _messages.IntegerField(4, variant=_messages.Variant.INT32) class Destination(_messages.Message): r"""From https://github.com/knative/pkg/blob/master/apis/duck/v1/destination.go Destination represents a target of an invocation over HTTP. Fields: ref: Ref points to an Addressable. uri: URI can be an absolute URL(non-empty scheme and non-empty host) pointing to the target or a relative URI. Relative URIs will be resolved using the base URI retrieved from Ref. """ ref = _messages.MessageField('KReference', 1) uri = _messages.StringField(2) class Empty(_messages.Message): r"""A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`. """ class ExternalDocumentation(_messages.Message): r"""ExternalDocumentation allows referencing an external resource for extended documentation. Fields: description: A string attribute. url: A string attribute. """ description = _messages.StringField(1) url = _messages.StringField(2) class JSON(_messages.Message): r"""JSON represents any valid JSON value. These types are supported: bool, int64, float64, string, []interface{}, map[string]interface{} and nil. Fields: raw: A byte attribute. """ raw = _messages.BytesField(1) class JSONSchemaProps(_messages.Message): r"""JSONSchemaProps is a JSON-Schema following Specification Draft 4 (http://json-schema.org/). Messages: DefinitionsValue: A DefinitionsValue object. DependenciesValue: A DependenciesValue object. PatternPropertiesValue: A PatternPropertiesValue object. PropertiesValue: A PropertiesValue object. Fields: additionalItems: A JSONSchemaPropsOrBool attribute. additionalProperties: A JSONSchemaPropsOrBool attribute. allOf: A JSONSchemaProps attribute. anyOf: A JSONSchemaProps attribute. default: A JSON attribute. definitions: A DefinitionsValue attribute. dependencies: A DependenciesValue attribute. description: A string attribute. enum: A string attribute. example: A JSON attribute. exclusiveMaximum: A boolean attribute. exclusiveMinimum: A boolean attribute. externalDocs: A ExternalDocumentation attribute. format: A string attribute. id: A string attribute. items: A JSONSchemaPropsOrArray attribute. maxItems: A string attribute. maxLength: A string attribute. maxProperties: A string attribute. maximum: A number attribute. minItems: A string attribute. minLength: A string attribute. minProperties: A string attribute. minimum: A number attribute. multipleOf: A number attribute. not_: A JSONSchemaProps attribute. oneOf: A JSONSchemaProps attribute. pattern: A string attribute. patternProperties: A PatternPropertiesValue attribute. properties: A PropertiesValue attribute. ref: A string attribute. required: A string attribute. schema: A string attribute. title: A string attribute. type: A string attribute. uniqueItems: A boolean attribute. """ @encoding.MapUnrecognizedFields('additionalProperties') class DefinitionsValue(_messages.Message): r"""A DefinitionsValue object. Messages: AdditionalProperty: An additional property for a DefinitionsValue object. Fields: additionalProperties: Additional properties of type DefinitionsValue """ class AdditionalProperty(_messages.Message): r"""An additional property for a DefinitionsValue object. Fields: key: Name of the additional property. value: A JSONSchemaProps attribute. """ key = _messages.StringField(1) value = _messages.MessageField('JSONSchemaProps', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class DependenciesValue(_messages.Message): r"""A DependenciesValue object. Messages: AdditionalProperty: An additional property for a DependenciesValue object. Fields: additionalProperties: Additional properties of type DependenciesValue """ class AdditionalProperty(_messages.Message): r"""An additional property for a DependenciesValue object. Fields: key: Name of the additional property. value: A JSONSchemaPropsOrStringArray attribute. """ key = _messages.StringField(1) value = _messages.MessageField('JSONSchemaPropsOrStringArray', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class PatternPropertiesValue(_messages.Message): r"""A PatternPropertiesValue object. Messages: AdditionalProperty: An additional property for a PatternPropertiesValue object. Fields: additionalProperties: Additional properties of type PatternPropertiesValue """ class AdditionalProperty(_messages.Message): r"""An additional property for a PatternPropertiesValue object. Fields: key: Name of the additional property. value: A JSONSchemaProps attribute. """ key = _messages.StringField(1) value = _messages.MessageField('JSONSchemaProps', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) @encoding.MapUnrecognizedFields('additionalProperties') class PropertiesValue(_messages.Message): r"""A PropertiesValue object. Messages: AdditionalProperty: An additional property for a PropertiesValue object. Fields: additionalProperties: Additional properties of type PropertiesValue """ class AdditionalProperty(_messages.Message): r"""An additional property for a PropertiesValue object. Fields: key: Name of the additional property. value: A JSONSchemaProps attribute. """ key = _messages.StringField(1) value = _messages.MessageField('JSONSchemaProps', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) additionalItems = _messages.MessageField('JSONSchemaPropsOrBool', 1) additionalProperties = _messages.MessageField('JSONSchemaPropsOrBool', 2) allOf = _messages.MessageField('JSONSchemaProps', 3, repeated=True) anyOf = _messages.MessageField('JSONSchemaProps', 4, repeated=True) default = _messages.MessageField('JSON', 5) definitions = _messages.MessageField('DefinitionsValue', 6) dependencies = _messages.MessageField('DependenciesValue', 7) description = _messages.StringField(8) enum = _messages.StringField(9, repeated=True) example = _messages.MessageField('JSON', 10) exclusiveMaximum = _messages.BooleanField(11) exclusiveMinimum = _messages.BooleanField(12) externalDocs = _messages.MessageField('ExternalDocumentation', 13) format = _messages.StringField(14) id = _messages.StringField(15) items = _messages.MessageField('JSONSchemaPropsOrArray', 16) maxItems = _messages.IntegerField(17) maxLength = _messages.IntegerField(18) maxProperties = _messages.IntegerField(19) maximum = _messages.FloatField(20) minItems = _messages.IntegerField(21) minLength = _messages.IntegerField(22) minProperties = _messages.IntegerField(23) minimum = _messages.FloatField(24) multipleOf = _messages.FloatField(25) not_ = _messages.MessageField('JSONSchemaProps', 26) oneOf = _messages.MessageField('JSONSchemaProps', 27, repeated=True) pattern = _messages.StringField(28) patternProperties = _messages.MessageField('PatternPropertiesValue', 29) properties = _messages.MessageField('PropertiesValue', 30) ref = _messages.StringField(31) required = _messages.StringField(32, repeated=True) schema = _messages.StringField(33) title = _messages.StringField(34) type = _messages.StringField(35) uniqueItems = _messages.BooleanField(36) class JSONSchemaPropsOrArray(_messages.Message): r"""JSONSchemaPropsOrArray represents a value that can either be a JSONSchemaProps or an array of JSONSchemaProps. Mainly here for serialization purposes. Fields: jsonSchemas: A JSONSchemaProps attribute. schema: A JSONSchemaProps attribute. """ jsonSchemas = _messages.MessageField('JSONSchemaProps', 1, repeated=True) schema = _messages.MessageField('JSONSchemaProps', 2) class JSONSchemaPropsOrBool(_messages.Message): r"""JSONSchemaPropsOrBool represents JSONSchemaProps or a boolean value. Defaults to true for the boolean property. Fields: allows: A boolean attribute. schema: A JSONSchemaProps attribute. """ allows = _messages.BooleanField(1) schema = _messages.MessageField('JSONSchemaProps', 2) class JSONSchemaPropsOrStringArray(_messages.Message): r"""JSONSchemaPropsOrStringArray represents a JSONSchemaProps or a string array. Fields: property: A string attribute. schema: A JSONSchemaProps attribute. """ property = _messages.StringField(1, repeated=True) schema = _messages.MessageField('JSONSchemaProps', 2) class KReference(_messages.Message): r"""from https://github.com/knative/pkg/blob/master/apis/duck/v1/knative_reference.go KReference contains enough information to refer to another object. It's a trimmed down version of corev1.ObjectReference. Fields: apiVersion:
frames in # the speech buffer. This value is used by the chosen 'playback' function() # to stop playing the audio. eos_index = SB_SAMPLE_SIZE # The 'end of speech' (eos) flag. Used inside the `_capture_function()` # this is set when it has detected the end of speech during the recording # phase. If no end of speech is detected it is set when the speech buffer # is full. # # When set the `_capture_function()` stops recording. eos = False # The 'estimate' ADC value that represents silence (zero). # The value is adjusted during the attenuation phase, # which runs (if not disabled) after each recording. adc_zero = SILENCE # The current speech-detection buffer _write offset_. A circular offset # used by `_capture_function()`. Updated from within `_capture_function()`. sdb_wr_offset = 0 # When we've detected speech we switch from writing to the speech detection # buffer to writing to the main speech buffer. The offset accommodates a copy # of the speech detection buffer, which is copied over the start of the speech # buffer prior to playback. # # The `sb_wr_offset` is updated from within `_capture_function()` # and reset to this value once recording has finished. sb_wr_offset = SDB_SAMPLE_SIZE # The 'read' offset into the speech buffer for samples being played back. # This is initialised to zero and is used by the `_playback_function()` # to read samples from the speech buffer and write them to the DAC, # until the end-of-speech index has been reached. sb_rd_offset = 0 # The initialisation state. # Set after `_init()` has completed successfully. initialised = False # --------------------------- # MicroPython/PyBoard objects # --------------------------- # The numerous PyBoard objects, timers, LEDs etc. Objects that need # configuration after construction are configured inside `_init()`. # The capture timer. This is used to invoke our `_capture_function()` # at the designated SAMPLE_FREQUENCY_HZ. # Configured in `_init()` and the function attached ans detached capture_timer = pyb.Timer(14) # The playback timer. This is used to invoke our chosen 'playback' function # at the required rate, which is PLAYBACK_FREQUENCY_HZ (or # 2 x CAPTURE_FREQUENCY_HZ if we're over-sampling the playback). # Configured in `_init()`. playback_timer = pyb.Timer(13) # LED objects red_led = pyb.LED(1) grn_led = pyb.LED(2) amb_led = pyb.LED(3) blu_led = pyb.LED(4) # ADC (Microphone) and DAC (loudspeaker) adc = pyb.ADC(pyb.Pin.board.X22) dac = pyb.DAC(1, bits=CAPTURE_BITS) # Switch object. During initialisation this will be used # to attach a handler function (`_user_switch_callback`) for the USER switch. sw = pyb.Switch() # Hardware timing pins. # This pin voltage is lowered on entry to the time-critical capture and # playback functions and raised on exit form the function. # Attach an oscilloscope to these pins to measure # the collection or playback callback duration. capture_timing_pin = pyb.Pin(pyb.Pin.board.Y1, pyb.Pin.OUT_PP) playback_timing_pin = pyb.Pin(pyb.Pin.board.Y2, pyb.Pin.OUT_PP) # ------------------------------ # Audio storage (sample buffers) # ------------------------------ # Buffers to store captured audio samples, depending on chosen sample # resolution (8 or 12 bits). # # The size of the arrays will be set by appending zeros during `_init()`. # We need one for the circular 'speech detection' buffer. # We need one to record the 'speech' to once speech has been detected. if CAPTURE_BITS == 8: sd_buf = array('B') s_buf = array('B') else: sd_buf = array('H') s_buf = array('H') # --------------------------------- # Silence attenuation configuration # --------------------------------- # Enabled? ATTENUATE_SILENCE = True # Speech threshold during attenuation - the absolute difference between the # silence estimate and a sample for it to be considered speech during # an attenuation frame. This is normally higher than the SPEECH_THRESHOLD # so we only attenuate if we're really sure it's not speech. if CAPTURE_BITS == 8: ATTENUATE_SPEECH_THRESHOLD = 50 # Trial & Error else: ATTENUATE_SPEECH_THRESHOLD = 800 # Trial & Error # The percentage of samples that need to be _speech_ in a speech-buffer # frame to prevent it from being attenuated. This is somewhat lower # than the corresponding speech detection threshold so only a few samples # need to represent speech to prevent the fame from being attenuated. # It's better to attenuate when we're _really_ sure it's silence because # attenuating to quickly or too close to speech can be disconcerting for # the listener. ATTENUATION_SAMPLES_PCENT = 1 # Frame period samples required to be speech # before the frame is considered part of speech. ATTENUATION_SPEECH_SAMPLE_THRESHOLD = FRAME_PERIOD_SAMPLES * \ ATTENUATION_SAMPLES_PCENT // 100 # An array to hold a list of the first sample index of silent frames. # Used during a 2nd-pass in attenuation to quickly attenuate silent # frames found in the 1st-pass. If attenuation is enabled this array is sized # by pre-populating zero values in `_init()`. silent_frames = array('I') # -------------------------------- # Configuration of diagnostic dump # -------------------------------- # Dump collections to a connected SD card. # Set to write collected data to an attached SD card. # Writing to the SD card will only take place if it looks like # there's an SD card present. # # Incidentally ... You will need to hard-reset the card # before you can see any written files. DUMP_TO_SD_CARD = False # The maximum number of capture files to maintain. # The files are used on a round-robin basis by writing # to capture file 1, then capture file 2, etc. DUMP_FILE_LIMIT = 50 # The next capture file number. # incremented in a circular fashion in `_dump_capture_info()`. dump_file_num = 1 # ----------------------------------------------------------------------------- def _init(): """Initialise the application data and hardware objects. If initialisation fails, i.e. it can't set the loudspeaker volume, it returns None. If initialisation fails the capture-playback loop should not run. If already initialised this function does nothing. Returns 'True' if successfully initialised. """ global initialised # Do nothing if already initialised if initialised: return print('Initialising...') # Sanity-check on capture resolution if CAPTURE_BITS not in [8, 12]: print('CAPTURE_BITS must be 8 or 12, not {}'.format(CAPTURE_BITS)) return if SB_SAMPLE_SIZE <= SDB_SAMPLE_SIZE: print('SB_SAMPLE_SIZE must be greater than SDB_SAMPLE_SIZE') return # Set loud-speaker volume. # This may fail if there are problems with the board. if not _set_volume(LS_VOLUME): print('set_volume({}) failed.' ' Is the Audio Skin attached?'.format(LS_VOLUME)) return grn_led.on() # Lit when listening (flashing when 'on hold') amb_led.off() # Lit when writing to the speech buffer blu_led.off() # Lit when playing back the speech buffer red_led.off() # Lit when writing to SD card/flash # Initialise the hardware timing pins (set them to 'high'). capture_timing_pin.high() playback_timing_pin.high() # Create each capture array # by appending the appropriate number of samples... for _ in range(SDB_SAMPLE_SIZE): sd_buf.append(0) for _ in range(SB_SAMPLE_SIZE): s_buf.append(0) # Create the attenuator's frame sample array # (if we're attenuating)... if ATTENUATE_SILENCE: for _ in range(SB_FRAME_COUNT): silent_frames.append(0) # Create a timer we attach our collect function when we `listen`. # The function will do nothing while 'capture' is False. capture_timer.init(freq=CAPTURE_FREQUENCY_HZ) # Same with the playback function... # If we're over-sampling the playback the playback frequency # is set to 2x the capture frequency and PLAYBACK_FREQUENCY_HZ # is not used. if USE_OVER_SAMPLE_PLAYBACK: playback_timer.init(freq=CAPTURE_FREQUENCY_HZ * 2) else: playback_timer.init(freq=PLAYBACK_FREQUENCY_HZ) # Attach a service function that will handle the USER switch being hit. # The supplied function simply toggles the `on_hold` flag. sw.callback(_user_switch_callback) initialised = True print('Initialised.') return True # ----------------------------------------------------------------------------- def _user_switch_callback(): """Called in response to the USER switch being depressed. When 'on-hold' (not listening) the green LED flashes. When listening the green LED is continuously lit. """ global on_hold # Just toggle the 'on hold' state if on_hold: on_hold = False else: on_hold = True # ----------------------------------------------------------------------------- def _dump_capture_info(): """Dumps capture data and timing statistics to a file. This only acts if dumping has been enabled and if an SD card is present. """ global dump_file_num # Do nothing if not enabled. if not DUMP_TO_SD_CARD: return # Do not capture if it looks like there's no SD card. if SD_ROOT not in sys.path: print('DUMP_TO_FILE is set but there is no SD card.') return # Indicate we're writing to the SD card # by lighting the red LED... red_led.on() # Construct the intended dump file name... dump_file = '{}/PyBdEcho.{}.txt'.format(SD_ROOT, dump_file_num) # What's the next file number? (1..N) dump_file_num += 1 if dump_file_num > DUMP_FILE_LIMIT: dump_file_num = 1 # Open, write, close... print('Dumping to {}...'.format(dump_file)) fp = open(dump_file, 'w') fp.write("adc_zero {}\n".format(adc_zero)) fp.write("sb_wr_offset {}\n".format(sb_wr_offset)) fp.write("sb_rd_offset {}\n".format(sb_rd_offset)) fp.write("sdb_wr_offset {}\n".format(sdb_wr_offset)) fp.write("eos_index {}\n".format(eos_index)) fp.write("sdb->\n") for i in range(SDB_SAMPLE_SIZE): value = sd_buf[i] fp.write("{}\n".format(value)) fp.write("sb->\n") for i in range(eos_index): value = s_buf[i] fp.write("{}\n".format(value)) fp.close() print('Dumped.') # Indicate end of file operations... red_led.off() # ----------------------------------------------------------------------------- def _set_volume(volume): """Sets the loudspeaker volume. Range is 0 (off) to 127.
<filename>openamundsen/modules/snow/multilayermodel.py import numpy as np from numba import njit, prange from openamundsen import constants, constants as c, heatconduction from openamundsen.snowmodel import SnowModel from . import snow class MultilayerSnowModel(SnowModel): def __init__(self, model): self.model = model s = model.state.snow num_snow_layers = len(model.config.snow.min_thickness) s.add_variable('num_layers', '1', 'Number of snow layers', dtype=int, retain=True) s.add_variable('thickness', 'm', 'Snow thickness', dim3=num_snow_layers, retain=True) s.add_variable('density', 'kg m-3', 'Snow density', 'snow_density', dim3=num_snow_layers) s.add_variable('ice_content', 'kg m-2', 'Ice content of snow', dim3=num_snow_layers, retain=True) s.add_variable('liquid_water_content', 'kg m-2', 'Liquid water content of snow', 'liquid_water_content_of_snow_layer', dim3=num_snow_layers, retain=True) s.add_variable('temp', 'K', 'Snow temperature', dim3=num_snow_layers, retain=True) s.add_variable('therm_cond', 'W m-1 K-1', 'Thermal conductivity of snow', dim3=num_snow_layers, retain=True) s.add_variable('heat_cap', 'J K-1 m-2', 'Areal heat capacity of snow', dim3=num_snow_layers) def initialize(self): roi = self.model.grid.roi s = self.model.state.snow s.swe[roi] = 0 s.depth[roi] = 0 s.area_fraction[roi] = 0 s.num_layers[roi] = 0 s.sublimation[roi] = 0 s.therm_cond[:, roi] = self.model.config.snow.thermal_conductivity s.thickness[:, roi] = 0 s.ice_content[:, roi] = 0 s.liquid_water_content[:, roi] = 0 s.temp[:, roi] = constants.T0 def albedo_aging(self): snow.albedo(self.model) def compaction(self): snow.compaction(self.model) def accumulation(self): model = self.model s = model.state pos = s.meteo.snowfall > 0 self.add_snow( pos, s.meteo.snowfall[pos], density=snow.fresh_snow_density(s.meteo.wet_bulb_temp[pos]), ) def heat_conduction(self): model = self.model s = model.state _heat_conduction( model.grid.roi_idxs, s.snow.num_layers, s.snow.thickness, s.soil.thickness, model.timestep, s.snow.temp, s.snow.therm_cond, s.soil.therm_cond, s.surface.heat_flux, s.snow.heat_cap, ) def melt(self): model = self.model s = model.state _melt( model.grid.roi_idxs, model.timestep, s.snow.num_layers, s.snow.melt, s.snow.thickness, s.snow.temp, s.snow.ice_content, s.snow.liquid_water_content, s.snow.heat_cap, ) def sublimation(self): model = self.model s = model.state # First resublimation frost = -np.minimum(s.snow.sublimation, 0) pos = frost > 0 self.add_snow( pos, frost[pos], density=snow.fresh_snow_density(s.meteo.wet_bulb_temp[pos]), ) # Then sublimation _sublimation( model.grid.roi_idxs, model.timestep, s.snow.num_layers, s.snow.ice_content, s.snow.thickness, s.snow.sublimation, ) def runoff(self): model = self.model s = model.state _runoff( model.grid.roi_idxs, snow.max_liquid_water_content(model), s.meteo.rainfall, s.snow.num_layers, s.snow.thickness, s.snow.temp, s.snow.ice_content, s.snow.liquid_water_content, s.snow.runoff, s.snow.heat_cap, ) def update_layers(self): model = self.model s = model.state _update_layers( model.grid.roi_idxs, s.snow.num_layers, np.array(model.config.snow.min_thickness), s.snow.thickness, s.snow.ice_content, s.snow.liquid_water_content, s.snow.heat_cap, s.snow.temp, s.snow.density, s.snow.depth, ) s.snow.albedo[s.snow.num_layers == 0] = np.nan def update_properties(self): snow.snow_properties(self.model) def add_snow( self, pos, ice_content, liquid_water_content=0, density=None, albedo=None, ): """ Add snow to the top of the snowpack. """ model = self.model s = model.state ice_content = np.nan_to_num(ice_content, nan=0., copy=True) pos_init = (s.snow.num_layers[pos] == 0) & (ice_content > 0) pos_init_global = model.global_mask(pos_init, pos) # If albedo is None, set it to the maximum albedo for currently snow-free pixels and keep # the current albedo for the other pixels if albedo is None: albedo = s.snow.albedo[pos] albedo[pos_init] = model.config.snow.albedo.max s.snow.albedo[pos] = albedo # Initialize first snow layer where necessary s.snow.num_layers[pos_init_global] = 1 s.snow.temp[0, pos_init_global] = np.minimum(s.meteo.temp[pos_init_global], constants.T0) # Add snow to first layer s.snow.ice_content[0, pos] += ice_content s.snow.liquid_water_content[0, pos] += liquid_water_content s.snow.thickness[0, pos] += ice_content / density @njit(cache=True, parallel=True) def _melt( roi_idxs, timestep, num_layers, melt, thickness, temp, ice_content, liquid_water_content, heat_cap, ): """ Calculate snowmelt following [1]. Parameters ---------- roi_idxs : ndarray(int, ndim=2) (N, 2)-array specifying the (row, col) indices within the data arrays that should be considered. timestep : float Model timestep (s). num_layers : ndarray(float, ndim=2) Number of snow layers. melt : ndarray(float, ndim=2) Snowmelt (kg m-2). thickness : ndarray(float, ndim=3) Snow thickness (m). temp : ndarray(float, ndim=3) Snow temperature (K). ice_content : ndarray(float, ndim=3) Ice content of snow (kg m-2). liquid_water_content : ndarray(float, ndim=3) Liquid water content of snow (kg m-2). heat_cap : ndarray(float, ndim=3) Areal heat capacity of snow (J K-1 m-2). References ---------- .. [1] <NAME>. (2015). A factorial snowpack model (FSM 1.0). Geoscientific Model Development, 8(12), 3867–3876. https://doi.org/10.5194/gmd-8-3867-2015 """ num_pixels = len(roi_idxs) for idx_num in prange(num_pixels): i, j = roi_idxs[idx_num] ice_content_change = melt[i, j] for k in range(num_layers[i, j]): cold_content = heat_cap[k, i, j] * (c.T0 - temp[k, i, j]) if cold_content < 0: ice_content_change -= cold_content / c.LATENT_HEAT_OF_FUSION temp[k, i, j] = c.T0 if ice_content_change > 0: if ice_content_change > ice_content[k, i, j]: # layer melts completely ice_content_change -= ice_content[k, i, j] thickness[k, i, j] = 0. liquid_water_content[k, i, j] += ice_content[k, i, j] ice_content[k, i, j] = 0. else: # layer melts partially thickness[k, i, j] *= (1 - ice_content_change / ice_content[k, i, j]) ice_content[k, i, j] -= ice_content_change liquid_water_content[k, i, j] += ice_content_change ice_content_change = 0. @njit(cache=True, parallel=True) def _sublimation( roi_idxs, timestep, num_layers, ice_content, thickness, sublimation, ): """ Calculate snow sublimation following [1]. Parameters ---------- roi_idxs : ndarray(int, ndim=2) (N, 2)-array specifying the (row, col) indices within the data arrays that should be considered. timestep : float Model timestep (s). num_layers : ndarray(float, ndim=2) Number of snow layers. ice_content : ndarray(float, ndim=3) Ice content of snow (kg m-2). thickness : ndarray(float, ndim=3) Snow thickness (m). sublimation : ndarray(float, ndim=2) Snow sublimation (kg m-2). References ---------- .. [1] <NAME>. (2015). A factorial snowpack model (FSM 1.0). Geoscientific Model Development, 8(12), 3867–3876. https://doi.org/10.5194/gmd-8-3867-2015 """ num_pixels = len(roi_idxs) for idx_num in prange(num_pixels): i, j = roi_idxs[idx_num] ice_content_change = max(sublimation[i, j], 0.) if ice_content_change > 0: for k in range(num_layers[i, j]): if ice_content_change > ice_content[k, i, j]: # complete sublimation of layer ice_content_change -= ice_content[k, i, j] thickness[k, i, j] = 0. ice_content[k, i, j] = 0. else: # partial sublimation thickness[k, i, j] *= (1 - ice_content_change / ice_content[k, i, j]) ice_content[k, i, j] -= ice_content_change ice_content_change = 0. @njit(cache=True, parallel=True) def _runoff( roi_idxs, max_liquid_water_content, rainfall, num_layers, thickness, temp, ice_content, liquid_water_content, runoff, heat_cap, ): """ Calculate snowmelt runoff following [1]. Parameters ---------- roi_idxs : ndarray(int, ndim=2) (N, 2)-array specifying the (row, col) indices within the data arrays that should be considered. max_liquid_water_content : ndarray(float, ndim=3) Maximum liquid water content (kg m-2). rainfall : ndarray(float, ndim=2) Rainfall amount (kg m-2). num_layers : ndarray(float, ndim=2) Number of snow layers. thickness : ndarray(float, ndim=3) Snow thickness (m). temp : ndarray(float, ndim=3) Snow temperature (K). ice_content : ndarray(float, ndim=3) Ice content of snow (kg m-2). liquid_water_content : ndarray(float, ndim=3) Liquid water content of snow (kg m-2). runoff : ndarray(float, ndim=2) Snow runoff (kg m-2). heat_cap : ndarray(float, ndim=3) Areal heat capacity of snow (J K-1 m-2). References ---------- .. [1] <NAME>. (2015). A factorial snowpack model (FSM 1.0). Geoscientific Model Development, 8(12), 3867–3876. https://doi.org/10.5194/gmd-8-3867-2015 """ num_pixels = len(roi_idxs) for idx_num in prange(num_pixels): i, j = roi_idxs[idx_num] runoff[i, j] = rainfall[i, j] if np.isnan(runoff[i, j]): runoff[i, j] = 0. for k in range(num_layers[i, j]): liquid_water_content[k, i, j] += runoff[i, j] if liquid_water_content[k, i, j] > max_liquid_water_content[k, i, j]: runoff[i, j] = liquid_water_content[k, i, j] - max_liquid_water_content[k, i, j] liquid_water_content[k, i, j] = max_liquid_water_content[k, i, j] else: runoff[i, j] = 0. # Refreeze liquid water cold_content = heat_cap[k, i, j] * (c.T0 - temp[k, i, j]) if cold_content > 0: ice_content_change = min( liquid_water_content[k, i, j], cold_content / c.LATENT_HEAT_OF_FUSION, ) liquid_water_content[k, i, j] -= ice_content_change ice_content[k, i, j] += ice_content_change temp[k, i, j] += c.LATENT_HEAT_OF_FUSION * ice_content_change / heat_cap[k, i, j] @njit(parallel=True, cache=True) def _heat_conduction( roi_idxs, num_layers, snow_thickness, soil_thickness, timestep, temp, therm_cond_snow, therm_cond_soil, heat_flux, heat_cap, ): """ Update snow layer temperatures. Parameters ---------- roi_idxs : ndarray(int, ndim=2) (N, 2)-array specifying the (row, col) indices within the data arrays that should be considered. num_layers : ndarray(float, ndim=2) Number of snow layers. snow_thickness : ndarray(float, ndim=3) Snow thickness (m). soil_thickness : ndarray(float, ndim=3) Soil thickness (m). timestep : float Model timestep (s). temp : ndarray(float, ndim=3) Snow temperature (K). therm_cond_snow : ndarray(float, ndim=3) Snow thermal conductivity (W m-1 K-1). therm_cond_soil : ndarray(float, ndim=3) Soil thermal conductivity (W m-1 K-1). heat_flux : ndarray(float, ndim=2) Surface heat flux (W m-2). heat_cap : ndarray(float, ndim=3) Areal heat capacity of snow (J K-1 m-2). References ---------- .. [1] <NAME>. (2015). A factorial snowpack model (FSM 1.0). Geoscientific Model Development, 8(12), 3867–3876. https://doi.org/10.5194/gmd-8-3867-2015 """ num_pixels = len(roi_idxs) for idx_num in prange(num_pixels): i, j = roi_idxs[idx_num] ns = num_layers[i, j] if ns > 0: temp[:ns, i, j] += heatconduction.temp_change( snow_thickness[:ns, i, j], timestep, temp[:ns, i, j], therm_cond_snow[:ns, i, j], temp[-1, i, j], soil_thickness[0, i, j], therm_cond_soil[0, i, j], heat_flux[i, j], heat_cap[:ns, i, j], ) @njit(cache=True, parallel=True) def _update_layers( roi_idxs, num_layers, min_thickness, thickness, ice_content, liquid_water_content, heat_cap, temp, density, depth, ): """ Update snow layers. Parameters ---------- roi_idxs : ndarray(int, ndim=2) (N, 2)-array specifying the (row, col) indices within the data arrays that should be considered.
__init__(self, link): self.chrom1 = link[0] self.start1 = int(link[1]) self.chrom2 = link[2] self.start2 = int(link[3]) self.score = int(link[4]) #------------------------------------------------------------------------------# # plot bedgraph class #------------------------------------------------------------------------------# class Bgtrack: #--------------------------------------------------------------------------# def __init__(self, ax, roi, bgs, char_len): self.ax = ax self.roi = roi self.bgs = bgs self.ymax = int(max(bgs.iloc[:,2])) + 1 # init converted axis self.bgs_cvt = self.bgs self.roi_cvt = self.roi # get the cutpoints of split segments self.cutpoints = [] # get the gap width between split segments self.gap = char_len * 2 # convert bgs to converted axis if (len(roi[0]) > 1): self.bgs_cvt, self.roi_cvt, self.cutpoints = \ self.convert_bgs(bgs, roi, self.gap) # plot features self.edgecolor = 'black' self.facecolor = 'gray' self.alpha = 0.8 #--------------------------------------------------------------------------# def plot(self): """plot bedgraph track """ ax = self.ax bgs = self.bgs_cvt ymax = self.ymax roi = self.roi_cvt cutpoints = self.cutpoints facecolor = self.facecolor alpha = self.alpha # set axis ax.set_xlim(xmin = roi[1], xmax = roi[2]) ax.set_ylim(ymin = 0, ymax = ymax) ax.set_xticks([]) ax.set_yticks([0, ymax]) ax.set_ylabel('Copy #', rotation = 'horizontal', ha = 'right') ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['bottom'].set_visible(False) [ax.axvline(x, linestyle = '--', alpha = 0.5, zorder = -1) for x in cutpoints] # plot track ax.fill_between(bgs.iloc[:,1], bgs.iloc[:,2], linewidth = 0, color = facecolor, alpha = alpha) # ax.bar(bgs.iloc[:,1], bgs.iloc[:,2], linewidth = 5, width=0, ec="k") #--------------------------------------------------------------------------# @staticmethod def convert_bgs(bgs, rois, gap): """ convert bgs to new x-axis """ # init bgs_cvt = [] current_start = gap current_end = gap cutpoints = [] # convert for roi in rois: current_end = current_start + roi[2] - roi[1] cutpoints.append(current_start) cutpoints.append(current_end) bgs_sub = bgs[(bgs['Chrom'] == roi[0]) & (bgs['Coord'] >= roi[1]) & (bgs['Coord'] <= roi[2])] cvt = np.interp(bgs_sub.Coord, [roi[1], roi[2]], [current_start, current_end]) bgs_cvt.append(bgs_sub.assign(Coord = cvt)) bgs_cvt.append(pd.DataFrame([[roi[0], current_end, 0]], columns = ['Chrom', 'Coord', 'CN'])) bgs_cvt.append(pd.DataFrame([[roi[0], current_start, 0]], columns = ['Chrom', 'Coord', 'CN'])) current_start = current_end + gap bgs_cvt = pd.concat(bgs_cvt).sort_values(['Chrom', 'Coord']) return bgs_cvt, [rois[0][0], 0, current_end + gap], cutpoints #------------------------------------------------------------------------------# # plot arc class #------------------------------------------------------------------------------# class Arctrack: #--------------------------------------------------------------------------# def __init__(self, ax, roi, links, char_len, colormap = None): self.ax = ax self.roi = roi self.links = links self.roi_cvt = roi self.links_cvt = links self.char_len = char_len self.gap = char_len * 2 self.cutpoints = [] # convert links to new axis if (len(roi[0]) > 1): self.links_cvt, self.roi_cvt, self.cutpoints = \ self.convert_links(links, roi, self.gap) # set color if (colormap is not None): self.sm = self.map_color([x.score for x in links], colormap) else: self.sm = None # plot features self.color = 'red' self.linewidth = 1 self.fontsize = 12 #--------------------------------------------------------------------------# def plot(self): """plot arc track """ ax = self.ax links = self.links_cvt roi = self.roi_cvt color = self.color fontsize = self.fontsize linewidth = self.linewidth cutpoints = self.cutpoints char_len = self.char_len ax.set_xlim(xmin = roi[1], xmax = roi[2]) ax.set_ylabel('Breakjoin', rotation = 'horizontal', ha = 'right') ax.set_xticks([]) ax.set_yticks([]) ax.set_yticklabels([]) ax.set(frame_on=False) [ax.axvline(x, linestyle = '--', alpha = 0.5, zorder = -1) for x in cutpoints] # divider = make_axes_locatable(ax) # cax = divider.append_axes('top', size='10%', pad='5%') # # draw straight line # ax.plot(0, 0, zorder = 0) # plt.colorbar(self.sm, cax = cax, orientation='horizontal') # cax.xaxis.set_ticks_position("top") # draw straight line ax.plot(0, 0, zorder = 0) # draw arcs heights = [] for lk in links: center = (lk.start1 + lk.start2)/2 width = (lk.start2 - lk.start1) if (self.sm is not None): color = self.sm.to_rgba(lk.score) # height = np.log2(abs(width))*10 height = np.sqrt(abs(width) + 10000) heights.append(height / 2) # plot arc ax.add_patch(Arc((center, 1), width, height, 0, 0, 180, color = color, linewidth = linewidth)) # plot arc score ax.text(center, height / 2, lk.score, horizontalalignment = 'center', verticalalignment = 'bottom', fontsize = fontsize) # set the y-axis ax.set_ylim(ymin = 0, ymax = max(heights) * 1.3) # cbar = plt.colorbar(self.sm, ax = ax, orientation='vertical', # pad = 0, fraction = 0.1, shrink = 0.9, aspect = 10, # ticks = [0, max([x.score for x in self.links])]) # cbar.ax.tick_params(labelsize = self.fontsize * 0.7) #--------------------------------------------------------------------------# @staticmethod def map_color(scores, colormap): """map color to given score """ if (scores == []): scores = [1] norm = colors.Normalize(0, max(scores)) cmap = cm.get_cmap(colormap) return cm.ScalarMappable(norm, cmap) #--------------------------------------------------------------------------# @staticmethod def convert_links(links, rois, gap): """ convert links to new x-axis """ # init links_cvt = deepcopy(links) current_start = gap current_end = gap cutpoints = [] # convert for roi in rois: current_end = current_start + roi[2] - roi[1] cutpoints.append(current_start) cutpoints.append(current_end) for i, lk in enumerate(links_cvt): lk_old = links[i] if (lk_old.chrom1 == roi[0] \ and lk_old.start1 >= roi[1] \ and lk_old.start1 <= roi[2]): lk.start1 = np.interp( lk_old.start1, [roi[1], roi[2]], [current_start, current_end]) if (lk_old.chrom2 == roi[0] \ and lk_old.start2 >= roi[1] \ and lk_old.start2 <= roi[2]): lk.start2 = np.interp( lk_old.start2, [roi[1], roi[2]], [current_start, current_end]) current_start = current_end + gap return links_cvt, [rois[0][0], 0, current_end + gap], cutpoints #------------------------------------------------------------------------------# # plot gene class #------------------------------------------------------------------------------# class Genetrack: #--------------------------------------------------------------------------# def __init__(self, ax, roi, beds, char_len): self.ax = ax self.roi = roi self.beds = beds self.char_len = char_len self.roi_cvt = roi self.beds_cvt = beds self.cutpoints = [] self.gap = char_len * 2 # convert beds to new axis if (len(roi[0]) > 1): self.beds_cvt, self.roi_cvt, self.cutpoints = \ self.convert_beds(beds, roi, self.gap) else: self.roi_cvt = self.roi = [roi] # plot features self.height = 0.7 self.edgecolor = 'black' self.facecolor = 'lightgrey' self.linewidth = 0.5 self.fontsize = 12 self.max_row = 30 self.arrow_length = self.char_len #--------------------------------------------------------------------------# def plot(self): """plot gene track """ ax = self.ax beds = self.beds_cvt roi_cvt = self.roi_cvt rois = self.roi cutpoints = self.cutpoints gap = self.gap # set axis ax.set_xlim(xmin = roi_cvt[1], xmax = roi_cvt[2]) ax.set_ylabel('Gene', rotation = 'horizontal', ha = 'right') # ax.set_xlabel(roi_cvt[0]) ax.set_yticks([]) ax.set_yticklabels([]) ax.spines['right'].set_visible(False) ax.spines['top'].set_visible(False) ax.spines['left'].set_visible(False) [ax.axvline(x, linestyle = '--', alpha = 0.5, zorder = -1) for x in cutpoints] # set x-axis ticks to actual genomic region cvt_start = gap cvt_end = gap x_actual = [] x_convert = [] for rg in rois: cvt_end = cvt_start + rg[2] - rg[1] - 1 x_convert.append(cvt_start) x_convert.append((cvt_start + cvt_end) / 2) x_convert.append(cvt_end) x_actual.append(f'{rg[1]:,}') x_actual.append('\n\n' + rg[0]) x_actual.append('\n' + f'{rg[2]:,}') cvt_start = cvt_end + 1 + gap # ax.text((rg[1] + rg[2]) / 2, 1, rg[0]) ax.set_xticks(x_convert) ax.set_xticklabels(x_actual) # get y position of each gene ys = self.get_gene_ypos() # plot track for idx, bed in enumerate(beds): # print(bed.start, bed.end, bed.name, ys[idx]) self.draw_gene(bed, ys[idx]) #--------------------------------------------------------------------------# def draw_gene(self, bed, ypos): """draw gene mode using one row of bed12 """ ax = self.ax height = self.height edgecolor = self.edgecolor facecolor = self.facecolor linewidth = self.linewidth fontsize = self.fontsize # draw straight line ax.plot([bed.start, bed.end], [ypos + height / 2, ypos + height / 2], color = edgecolor, linewidth = linewidth, zorder = 0) # get the first exon if (bed.strand == '-'): block_starts = bed.block_starts[::-1].copy() block_sizes = bed.block_sizes[::-1].copy() label_pos = bed.start - self.arrow_length * 1.3 else: block_starts = bed.block_starts.copy() block_sizes = bed.block_sizes.copy() label_pos = bed.start - self.arrow_length * 0.3 exon1_start = bed.start + block_starts.pop() exon1_end = exon1_start + block_sizes.pop() # get arrow xy arrow_xy = self.get_arrow_xy( exon1_start, exon1_end, bed.strand, ypos) # draw arrow head ax.add_patch(Polygon(arrow_xy, edgecolor = edgecolor, facecolor = facecolor, linewidth = linewidth)) # draw other parts of gene for idx, rg in enumerate(block_starts): start_pos = bed.start + rg end_pos = start_pos + block_sizes[idx] xy = [(start_pos, ypos), (start_pos, ypos + height), (end_pos, ypos + height), (end_pos, ypos)] ax.add_patch(Polygon(xy, edgecolor = edgecolor, facecolor = facecolor, linewidth = linewidth)) # # plot gene symbol # print(label_pos, bed.start, bed.end, self.arrow_length, ypos) ax.text(label_pos, ypos + height / 2, bed.name, horizontalalignment = 'right', verticalalignment = 'center', fontsize = fontsize) #--------------------------------------------------------------------------# def get_arrow_xy(self, start, end, strand, ypos): """get arrow xy locations to indicate gene direction Parameters ---------- ypos : y-axis position of the arrow length : length of the arrow head, relative to total genomic region to plot height : height of the arrow """ height = self.height length = self.arrow_length # calculate y y0 = ypos y1 = ypos + height # calculate x if (strand == '+'): x0 = start x1 = end x2 = end + length elif (strand == '-'): x0 = end x1 = start x2 = start - length # get the xy xy =
# the prefix looks like "MCA1ROI:1:" # and we want the 1 at the end mcaroi_prefix_match = self.mcaroi_prefix_re.search(prefix) if mcaroi_prefix_match is None: raise ValueError( f"mcaroi prefix '{prefix}' does not match the expected pattern `{self.mcaroi_prefix_re.pattern}`" ) self.mcaroi_number = int(mcaroi_prefix_match.group("mcaroi_number")) def configure_mcaroi(self, *, min_x, size_x, roi_name=None, use=True): """ Configure the details of an MCAROI. Parameters ---------- min_x: int the starting bin? for the roi size_x: int the width in bins? for the roi roi_name: str, optional use: bool, defaults to True """ logger.debug( "configuring Xspress3 MCAROI '%s': name '%s' min_x '%d' size_x '%d' use '%s'", self.prefix, roi_name, min_x, size_x, use, ) self.min_x.put(int(min_x)) self.size_x.put(int(size_x)) if roi_name: self.roi_name.put(roi_name) self.use.put(use) # remove in favor of configure_mcaroi def configure_roi(self, ev_min, ev_size): """Configure the MCAROI with min and size eV Parameters ---------- ev_min : int minimum electron volts for ROI ev_size : int ROI size (width) in electron volts """ ev_min = int(ev_min) ev_size = int(ev_size) # assume if this ROI is being configured # that it should be read, meaning the # "use" PV must be set to 1 use_roi = 1 configuration_changed = any( [ self.min_x.get() != ev_min, self.size_x.get() != ev_size, self.use.get() != use_roi, ] ) if configuration_changed: logger.debug( "Setting up Xspress3 ROI: name=%s ev_min=%s ev_size=%s " "use=%s prefix=%s channel=%s", self.name, ev_min, ev_size, use_roi, self.prefix, # self.parent is the ?? class # self.parent.parent is the ?? class # TODO: I don't like the assumption that self has a parent self.parent.parent.channel_num, ) self.min_x.put(ev_min) self.size_x.put(ev_size) self.use.put(use_roi) else: # nothing has changed pass def clear(self): """Clear and disable this ROI""" # it is enough to just disable the ROI # self.min_x.put(0) # self.size_x.put(0) self.use.put(0) class Sca(ADBase): # includes Dead Time correction, for example # sca numbers go from 0 to 10 clock_ticks = Cpt(EpicsSignalRO, "0:Value_RBV") reset_ticks = Cpt(EpicsSignalRO, "1:Value_RBV") reset_counts = Cpt(EpicsSignalRO, "2:Value_RBV") all_event = Cpt(EpicsSignalRO, "3:Value_RBV") all_good = Cpt(EpicsSignalRO, "4:Value_RBV") window_1 = Cpt(EpicsSignalRO, "5:Value_RBV") window_2 = Cpt(EpicsSignalRO, "6:Value_RBV") pileup = Cpt(EpicsSignalRO, "7:Value_RBV") event_width = Cpt(EpicsSignalRO, "8:Value_RBV") dt_factor = Cpt(EpicsSignalRO, "9:Value_RBV") dt_percent = Cpt(EpicsSignalRO, "10:Value_RBV") def _validate_mcaroi_number(mcaroi_number): """ Raise ValueError if the MCAROI number is 1. not an integer 2. outside the allowed interval [1,48] Parameters ---------- mcaroi_number: could be anything MCAROI number candidate """ if not isinstance(mcaroi_number, int): raise ValueError(f"MCAROI number '{mcaroi_number}' is not an integer") elif not 1 <= mcaroi_number <= 48: raise ValueError( f"MCAROI number '{mcaroi_number}' is outside the allowed interval [1,48]" ) else: # everything is awesome pass def build_channel_class(channel_number, mcaroi_numbers, channel_parent_classes=None): """Build an Xspress3 channel class with the specified channel number and MCAROI numbers. MCAROI numbers need not be consecutive. The complication of using dynamically generated classes is the price for the relative ease of including the channel number in MCAROI PVs and the ability to specify the number of MCAROIs that will be used rather than defaulting to the maximum of 48 per channel. Parameters ---------- channel_number: int the channel number, 1-16 mcaroi_numbers: Sequence of int sequence of MCAROI numbers, not necessarily consecutive, allowed values are 1-48 channel_parent_classes: list-like, optional sequence of all parent classes for the generated channel class, by default the only parent is ophyd.areadetector.ADBase Returns ------- a dynamically generated class similar to this: class GeneratedXspress3Channel(ADBase): channel_num = 2 sca = Cpt(Sca, ...) mca = Cpt(Mca, ...) mca_sum = Cpt(McaSum, ...) mcarois = DDC(...4 McaRois...) def get_mcaroi(self, *, number): ... def iterate_mcarois(self): ... def clear_all_rois(self): ... """ if channel_parent_classes is None: channel_parent_classes = tuple([ADBase]) _validate_channel_number(channel_number=channel_number) # create a tuple in case the mcaroi_numbers parameter can be iterated only once mcaroi_numbers = tuple([mcaroi_number for mcaroi_number in mcaroi_numbers]) for mcaroi_number in mcaroi_numbers: _validate_mcaroi_number(mcaroi_number=mcaroi_number) mcaroi_name_re = re.compile(r"mcaroi\d{2}") # the next six functions will become methods of the generated channel class def __repr__(self): return f"{self.__class__.__name__}(channel_number={self.channel_number}, mcaroi_numbers={self.mcaroi_numbers})" def get_mcaroi_count(self): return len(mcaroi_numbers) def get_mcaroi(self, *, mcaroi_number): _validate_mcaroi_number(mcaroi_number=mcaroi_number) try: return getattr(self, f"mcaroi{mcaroi_number:02d}") except AttributeError as ae: raise ValueError( f"no MCAROI on channel {self.channel_number} " f"with prefix '{self.prefix}' has number {mcaroi_number}" ) from ae def iterate_mcaroi_attr_names(self): for attr_name in self.__dir__(): if mcaroi_name_re.match(attr_name): yield attr_name def iterate_mcarois(self): """ Iterate over McaRoi children of the Xspress3Channel.mcarois attribute. Yields ------ McaRoi instance """ for mcaroi_name, mcaroi in self._signals.items(): if mcaroi_name_re.match(mcaroi_name): yield mcaroi def clear_all_rois(self): """Clear all MCAROIs""" for mcaroi in self.iterate_mcarois(): mcaroi.clear() channel_fields_and_methods = { "__repr__": __repr__, "channel_number": channel_number, "mcaroi_numbers": tuple(sorted(mcaroi_numbers)), "sca": Cpt(Sca, f"C{channel_number}SCA:"), "mca": Cpt(Mca, f"MCA{channel_number}:"), "mca_sum": Cpt(McaSum, f"MCASUM{channel_number}:"), "get_mcaroi_count": get_mcaroi_count, "get_mcaroi": get_mcaroi, "iterate_mcaroi_attr_names": iterate_mcaroi_attr_names, "iterate_mcarois": iterate_mcarois, "clear_all_rois": clear_all_rois, } channel_fields_and_methods.update( { f"mcaroi{mcaroi_i:02d}": Cpt( McaRoi, # MCAROI PV suffixes look like "MCA1ROI:2:" f"MCA{channel_number}ROI:{mcaroi_i:d}:", ) for mcaroi_i in mcaroi_numbers } ) return type( "GeneratedXspress3Channel", channel_parent_classes, channel_fields_and_methods ) def _validate_channel_number(channel_number): """ Raise ValueError if the channel number is 1. not an integer 2. outside the allowed interval [1,16] Parameters ---------- channel_number: could be anything, but should be int channel number candidate """ if not isinstance(channel_number, int): raise ValueError(f"channel number '{channel_number}' is not an integer") elif not 1 <= channel_number <= 16: raise ValueError( f"channel number '{channel_number}' is outside the allowed interval [1,16]" ) else: # everything is great pass def build_detector_class( channel_numbers, mcaroi_numbers, detector_parent_classes=None, extra_class_members=None, ): return build_xspress3_class( channel_numbers=channel_numbers, mcaroi_numbers=mcaroi_numbers, xspress3_parent_classes=detector_parent_classes, extra_class_members=extra_class_members, ) def build_xspress3_class( channel_numbers, mcaroi_numbers, xspress3_parent_classes=None, extra_class_members=None, ): """Build an Xspress3 detector class with the specified channel and roi numbers. The complication of using dynamically generated detector classes is the price for being able to easily specify the exact number of channels and MCAROIs per channel in use on the detector. Detector classes generated by build_detector_class include these "soft" PVs which are not part of the Xspress3 IOC but are used by Xspress3FileStore: external_trig total_points spectra_per_point make_directories rewindable Parameters ---------- channel_numbers: Sequence of int sequence of channel numbers, 1-16, for the detector; for example [1, 2, 3, 8] mcaroi_numbers: Sequence of int sequence of MCAROI numbers, 1-48, for each channel; for example [1, 2, 3, 10] xspress3_parent_classes: list-like, optional sequence of all parent classes for the generated detector class, if specified include *all* necessary parent classes; if not specified the default parent is ophyd.areadetector.Xspress3Detector extra_class_members: Dict[String, Any] a dictionary of extra class members to be passed to the builtin type(...) function; see the builtin type function for allowed key-value pairs Returns ------- a dynamically generated class similar to the following: class GeneratedXspress3Detector(Xspress3Detector, SomeMixinClass, ...): external_trig = Cpt(Signal, value=False) total_points = Cpt(Signal, value=-1) spectra_per_point = Cpt(Signal, value=1) make_directories = Cpt(Signal, value=False) rewindable = Cpt(Signal, value=False) channels = DDC(...4 Xspress3Channels with 3 ROIs each...) def get_channel_count(self): .... def get_channel(self, channel_number): .... def iterate_channels(self): ... """ if xspress3_parent_classes is None: xspress3_parent_classes = tuple([Xspress3Detector]) if extra_class_members is None: extra_class_members = dict() # in case channel_numbers can be iterated only once, create a tuple channel_numbers = tuple([channel_number for channel_number in channel_numbers]) # in case mcaroi_numbers can be iterated only once, create a tuple mcaroi_numbers = tuple([mcaroi_number for mcaroi_number in mcaroi_numbers]) channel_attr_name_re = re.compile(r"channel\d{2}") # the following four functions will become methods of the generated xspress3 class def __repr__(self): """Return a string representation of this xspress3 class. Returns ------- str : text representation of the dynamically generated xspress3 class """ return f"{self.__class__.__name__}(channels=({','.join([str(channel) for channel in self.iterate_channels()])}))" def get_channel_count(self): """Return the number of channels on this xspress3 class. Returns ------- int : count of channels on this xspress3 class """ return len(channel_numbers) def get_channel(self, *, channel_number): """Return the channel object corresponding to the specified channel number. Parameters ---------- channel_number integer channel number Returns ------- channel : GeneratedXspress3Channel Raises ------ ValueError when there is no channel with the specified channel number """ _validate_channel_number(channel_number=channel_number) try: return getattr(self, f"channel{channel_number:02d}") except AttributeError as ae: raise ValueError( f"no channel on detector with prefix '{self.prefix}' " f"has number {channel_number}" ) from ae def iterate_channels(self): """Yield the channel objects of this xspress3 class in the order they were specified. Yields ------ channel : GeneratedXspress3Channel """ for channel_attr_name in self.__dir__(): if channel_attr_name_re.match(channel_attr_name): yield getattr(self, channel_attr_name) xspress3_fields_and_methods = dict( **{ "channel_numbers": tuple(sorted(channel_numbers)), "external_trig": Cpt(Signal, value=False, doc="Use external triggering"), "total_points": Cpt( Signal, value=-1, doc="The total number of points to acquire overall", ), "spectra_per_point": Cpt( Signal,
<filename>src/sorting_relevant_items_first_task.py import operator import numpy as np from shared import config from shared.indexes import Index from shared import functions class ReadDataFile: def __init__(self, _type, first_n_items=20, echo=True): self._dict = dict() f = open(config.task_dataset_path + _type + '.txt', 'r') lines = f.read().split('\n') self._description = lines[0].strip() for line in lines[1:]: line = line.strip() if len(line) == 0: continue _key, value_lst = (line.split(':', 1)) _key = _key.strip() self._dict[_key] = list() for value in value_lst.strip().split(';')[:first_n_items]: if len(value): self._dict[_key].append(value) self._dict[_key] = set(self._dict[_key]) if echo: print(_key, len(self._dict[_key])) def get_dict(self): return self._dict def get_description(self): return self._description class AutomateQuery: def __init__(self): pass @staticmethod def prepare_queries(data_dict, description): _query_lst = list() item_list = list() for _category_label, selected_items in data_dict.items(): # print(selected_items) # print('->', _category_label) correct_answer_set = selected_items item_list.extend(list(selected_items)) _query = CategoryQuery(_category_label, correct_answer_set) _query.set_description(description + '/' + _category_label) _query_lst.append(_query) for _query in _query_lst: _query.add_item_list(item_list) return _query_lst class CategoryQuery: def __init__(self, category_name, correct_answer_set): self.query = category_name self.correct_answer_set = {_ans.lower() for _ans in correct_answer_set} self.item_list = None self.discovered_answer = None self.description = None self.answer_dict = None self.measure_p_at_k = None self.measure_ap = None self.incorrect_answers = None def add_item_list(self, item_list): self.item_list = list(set(item_list)) def get_possible_answer_set(self): return self.item_list def prepare_null_vectors(self, gram_to_vector): gram_to_vector[self.query] = None for item in self.item_list: gram_to_vector[item] = None return None def set_description(self, description): self.description = description def get_description(self): if self.description is None: return 'Sort item list for Category Name:' + self.query else: return self.description + ' - Sort relevant item list:' def get_all_correct_answer(self): return self.correct_answer_set def get_query(self): return self.query def set_discovered_answer(self, answer_dict, echo=True): self.answer_dict = answer_dict sorted_answer_lst = sorted(answer_dict.items(), key=operator.itemgetter(1), reverse=True) self.incorrect_answers = [] # print(sorted_answer_lst) k = len(self.get_all_correct_answer()) # print('total correct answers', k) self.measure_p_at_k = 0 self.measure_ap = 0 num_correct_answers = 0 last_correct_index = 0 for i, (query_answer_tuple, score) in enumerate(sorted_answer_lst): query, answer = query_answer_tuple # print(i, k, i < k) # print(answer.lower(), self.get_all_correct_answer()) if answer.lower() in self.get_all_correct_answer(): num_correct_answers += 1 if i < k: self.measure_p_at_k += 1 self.measure_ap += num_correct_answers / (i + 1) last_correct_index = i + 1 elif k > num_correct_answers: self.incorrect_answers.append((answer, i + 1)) self.measure_p_at_k /= k # averaged over k self.measure_ap /= k # divided by number of correct results answers, scores = list(zip(*sorted_answer_lst[:last_correct_index])) self.discovered_answer = list(zip(*[list(zip(*answers))[1], scores])) if echo: print('p@k', self.measure_p_at_k, 'ap', self.measure_ap, sorted_answer_lst) def get_measures(self): return {'p@k': self.measure_p_at_k, 'ap': self.measure_ap} def get_answered_status(self): return 1.0 == self.measure_p_at_k def get_discovered_answer(self): return self.discovered_answer def get_incorrect_answers(self): return self.incorrect_answers class RankingTask: def __init__(self): pass @staticmethod def explain_result(v1, e1, query, _index, dimensions, max_n=-1, top_n_answers=3): incorrect_answers = query.get_incorrect_answers() answers = query.get_discovered_answer() print('\n', query.get_description(), 'Known Answer:', [ans.title() for ans in query.get_all_correct_answer()]) print(' > sorted order:', answers) print(' > incorrect answers:', incorrect_answers) meta_vector = np.array(v1) * 1.0 weight = 1.0 for e, score in answers: v = _index.get_eve_vector(e) meta_vector += np.array(v) * score weight += score meta_vector /= weight # weighted average for e2, rank in incorrect_answers[:top_n_answers]: e2 = e2 v2 = _index.get_eve_vector(e2) vector_product = np.array(meta_vector) * np.array(v2) dominant_features = list() for score, dim in sorted(list(zip(*[vector_product, dimensions])), reverse=True): if score == 0: break dominant_features.append((score, dim)) print(' --> dominant features (' + e1 + ', ' + e2 + ')', dominant_features[:max_n]) print('> top answers') for e, score in answers[:top_n_answers]: e2 = e v2 = _index.get_eve_vector(e2) vector_product = np.array(meta_vector) * np.array(v2) dominant_features = list() for score, dim in sorted(list(zip(*[vector_product, dimensions])), reverse=True): if score == 0: break dominant_features.append((score, dim)) print(' --> dominant features (' + e1 + ', ' + e2 + ')', dominant_features[:max_n]) def run_tests_eve(self, _query_lst, _index, echo=True, explain_wrong=True, explain_always=False, max_n_features=40, top_n_answers=3): results_lst = [] print('eve tests') results_lst.append( self.__run_test_eve__(_query_lst, _index, echo=echo, explain_wrong=explain_wrong, explain_always=explain_always, max_n_features=max_n_features, top_n_answers=top_n_answers)) return results_lst @staticmethod def __run_test_eve__(_query_lst, _index, echo=True, explain_wrong=True, explain_always=False, cache=True, max_n_features=40, top_n_answers=3): similarity_cached_dict = dict() _measures = {'p@k': 0, 'ap': 0} dimensions = _index.get_eve_dimensions() for question_no, (query) in enumerate(_query_lst): if echo: print('Question:' + str(question_no + 1) + '/' + str(len(_query_lst)), query.get_description(), 'Known Answer:', query.get_all_correct_answer()) scores_dict = dict() e1 = query.get_query() v1 = _index.get_eve_vector(e1) for e2 in query.get_possible_answer_set(): v2 = _index.get_eve_vector(e2) if cache and (e1, e2,) in similarity_cached_dict: score = similarity_cached_dict[(e1, e2,)] else: score = functions.calculate_cosine_similarity(v1, v2) similarity_cached_dict[(e1, e2,)] = score similarity_cached_dict[(e2, e1,)] = score scores_dict[e1, e2] = score query.set_discovered_answer(scores_dict, echo=echo) for k, v in query.get_measures().items(): _measures[k] += v if echo: print(' >', query.get_measures()) if (explain_wrong and not query.get_answered_status()) or explain_always: RankingTask.explain_result(v1, e1, query, _index, dimensions, max_n=max_n_features, top_n_answers=top_n_answers) if echo: print('Measure: ' + ','.join([k + ': ' + str(v/len(_query_lst)) for k, v in _measures.items()]), 'Total Queries:', len(_query_lst)) print('') return _measures, len(_query_lst) def run_tests_word2vec(self, _query_lst, sg=None, echo=True, cache=True): similarity_cached_dict = dict() _measures = {'p@k': 0, 'ap': 0} results_lst = list() print('word2vec tests, sg =', sg) for question_no, (query) in enumerate(_query_lst): if echo: print('Question:' + str(question_no + 1) + '/' + str(len(_query_lst)), query.get_description(), 'Known Answer:', query.get_all_correct_answer()) scores_dict = dict() e1 = query.get_query() v1 = _index.get_word2vec_vector(e1, sg) for e2 in query.get_possible_answer_set(): if cache and (e1, e2,) in similarity_cached_dict: sim_score = similarity_cached_dict[(e1, e2,)] else: v2 = _index.get_word2vec_vector(e2, sg) sim_score = functions.calculate_cosine_similarity(v1, v2) similarity_cached_dict[(e1, e2,)] = sim_score similarity_cached_dict[(e2, e1,)] = sim_score scores_dict[e1, e2] = sim_score query.set_discovered_answer(scores_dict, echo=echo) for k, v in query.get_measures().items(): _measures[k] += v if echo: print(' >', query.get_measures()) if echo: print('Measure: ' + ','.join([k + ': ' + str(v / len(_query_lst)) for k, v in _measures.items()]), 'Total Queries:', len(_query_lst)) print('') results_lst.append((_measures, len(_query_lst))) return results_lst def run_tests_fasttext(self, _query_lst, sg=None, echo=True, cache=True): similarity_cached_dict = dict() _measures = {'p@k': 0, 'ap': 0} results_lst = list() print('fasttext tests, sg =', sg) for question_no, (query) in enumerate(_query_lst): if echo: print('Question:' + str(question_no + 1) + '/' + str(len(_query_lst)), query.get_description(), 'Known Answer:', query.get_all_correct_answer()) scores_dict = dict() e1 = query.get_query() v1 = _index.get_fasttext_vector(e1, sg) for e2 in query.get_possible_answer_set(): if cache and (e1, e2,) in similarity_cached_dict: sim_score = similarity_cached_dict[(e1, e2,)] else: v2 = _index.get_fasttext_vector(e2, sg) sim_score = functions.calculate_cosine_similarity(v1, v2) similarity_cached_dict[(e1, e2,)] = sim_score similarity_cached_dict[(e2, e1,)] = sim_score scores_dict[e1, e2] = sim_score query.set_discovered_answer(scores_dict, echo=echo) for k, v in query.get_measures().items(): _measures[k] += v if echo: print(' >', query.get_measures()) if echo: print('Measure: ' + ','.join([k + ': ' + str(v / len(_query_lst)) for k, v in _measures.items()]), 'Total Queries:', len(_query_lst)) print('') results_lst.append((_measures, len(_query_lst))) return results_lst def run_tests_glove(self, _query_lst, echo=True, cache=True): similarity_cached_dict = dict() _measures = {'p@k': 0, 'ap': 0} results_lst = list() print('glove tests') for question_no, (query) in enumerate(_query_lst): if echo: print('Question:' + str(question_no + 1) + '/' + str(len(_query_lst)), query.get_description(), 'Known Answer:', query.get_all_correct_answer()) scores_dict = dict() e1 = query.get_query() v1 = _index.get_glove_vector(e1) for e2 in query.get_possible_answer_set(): if cache and (e1, e2,) in similarity_cached_dict: sim_score = similarity_cached_dict[(e1, e2,)] else: v2 = _index.get_glove_vector(e2) sim_score = functions.calculate_cosine_similarity(v1, v2) similarity_cached_dict[(e1, e2,)] = sim_score similarity_cached_dict[(e2, e1,)] = sim_score scores_dict[e1, e2] = sim_score query.set_discovered_answer(scores_dict, echo=echo) for k, v in query.get_measures().items(): _measures[k] += v if echo: print(' >', query.get_measures()) if echo: print('Measure: ' + ','.join([k + ': ' + str(v / len(_query_lst)) for k, v in _measures.items()]), 'Total Queries:', len(_query_lst)) print('') results_lst.append((_measures, len(_query_lst))) return results_lst def test_explain(): _index = Index(config) _types_lst = ['nobel_laureates', 'music_genres'] _query_lst = list() _type_query_lst = list() allowed_queries = {'Nobel laureates/List of Nobel laureates in Chemistry - Sort relevant item list:', 'Music genres/Classical music - Sort relevant item list:'} for __type in _types_lst: data_file = ReadDataFile(__type, first_n_items=20, echo=False) automatic_queries = \ AutomateQuery().prepare_queries(data_file.get_dict(), data_file.get_description()) _query_lst.extend(automatic_queries) _type_query_lst.append(automatic_queries) _task = RankingTask() for _i, _query_lst in enumerate(_type_query_lst): _index.load_all_indexes(_types_lst[_i]) _allowed_query_lst = list() for _q in _query_lst: if _q.get_description() in allowed_queries: _allowed_query_lst.append(_q) result_lst = _task.run_tests_eve(_allowed_query_lst, _index, echo=False, explain_wrong=True, explain_always=True, max_n_features=6, top_n_answers=1) for result in result_lst: _measures, _total = result print(_types_lst[_i].title(), ' -> Measure: ' + ', '.join([k + ': ' + str(v / _total) for k, v in _measures.items()]), 'Total Queries:', _total) if __name__ == '__main__': types_lst = ['cuisine', 'nobel_laureates', 'music_genres', 'movie_genres', 'european_cities', 'animal_classes', 'country_continent'] # types_lst = ['cuisine'] query_lst = list() _type_query_lst = list() for __type in types_lst: data_file = ReadDataFile(__type, first_n_items=50) automatic_queries = \ AutomateQuery().prepare_queries(data_file.get_dict(), data_file.get_description()) query_lst.extend(automatic_queries) _type_query_lst.append(automatic_queries) print('total queries', len(query_lst)) _index = Index(config) task = RankingTask() for i, query_lst in enumerate(_type_query_lst): _index.load_all_indexes(types_lst[i]) result_lst = task.run_tests_eve(query_lst, _index, echo=False, explain_wrong=False, explain_always=False) for result in result_lst: measures, total = result print(types_lst[i].title(), ' -> Measure: ' + ', '.join([k + ': ' + str(v / total) for k,