input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<gh_stars>1-10
import os
import sys
import numpy as np
# require pythonnet, pip install pythonnet
import clr
from System import String
# sys.path.append("DLLs")
clr.AddReference("ThermoFisher.CommonCore.Data")
clr.AddReference("ThermoFisher.CommonCore.RawFileReader")
import ThermoFisher
from ThermoFisher.CommonCore.Data.Interfaces import IScanEventBase, IScanEvent
'''
rawFile = ThermoFisher.CommonCore.RawFileReader.RawFileReaderAdapter.FileFactory(raw_filename)
var scanStatistics = rawFile.GetScanStatsForScanNumber(1);
var seg = rawFile.GetSegmentedScanFromScanNumber(1, scanStatistics);
var scanEvent = rawFile.GetScanEventForScanNumber(1);
var trailerData = rawFile.GetTrailerExtraInformation(1);
'''
def DotNetArrayToNPArray(arr, dtype):
return np.array(list(arr), dtype=dtype)
'''
APIs are similar to pymsfilereader(https://github.com/frallain/pymsfilereader), but some APIs have not be implemented yet."
'''
class RawFileReader(object):
# static class members
sampleType = {0: 'Unknown',
1: 'Blank',
2: 'QC',
3: 'Standard Clear (None)',
4: 'Standard Update (None)',
5: 'Standard Bracket (Open)',
6: 'Standard Bracket Start (multiple brackets)',
7: 'Standard Bracket End (multiple brackets)'}
controllerType = {-1: 'No device',
0: 'MS',
1: 'Analog',
2: 'A/D card',
3: 'PDA',
4: 'UV',
'No device': -1,
'MS': 0,
'Analog': 1,
'A/D card': 2,
'PDA': 3,
'UV': 4}
massAnalyzerType = {'ITMS': 0,
'TQMS': 1,
'SQMS': 2,
'TOFMS': 3,
'FTMS': 4,
'Sector': 5,
0: 'ITMS',
1: 'TQMS',
2: 'SQMS',
3: 'TOFMS',
4: 'FTMS',
5: 'Sector'}
activationType = {'CID': 0,
'MPD': 1,
'ECD': 2,
'PQD': 3,
'ETD': 4,
'HCD': 5,
'Any activation type': 6,
'SA': 7,
'PTR': 8,
'NETD': 9,
'NPTR': 10,
'UVPD': 11,
'ETHCD': 12, # not Thermo's build-in activation types
'ETCID': 13, # not Thermo's build-in activation types
0: 'CID',
1: 'MPD',
2: 'ECD',
3: 'PQD',
4: 'ETD',
5: 'HCD',
6: 'Any activation type',
7: 'SA',
8: 'PTR',
9: 'NETD',
10: 'NPTR',
11: 'UVPD',
12: 'ETHCD', # not Thermo's build-in activation types
13: 'ETCID', # not Thermo's build-in activation types
}
detectorType = {'Valid': 0,
'Any': 1,
'NotValid': 2,
0: 'Valid',
1: 'Any',
2: 'NotValid',
}
scanDataType = {'Centroid': 0,
'Profile': 1,
'Any': 2,
0: 'Centroid',
1: 'Profile',
2: 'Any',
}
scanType = {'Full': 0,
'Zoom': 1,
'SIM': 2,
'SRM': 3,
'CRM': 4,
'Any': 5,
'Q1MS': 6,
'Q3MS': 7,
0: 'Full',
1: 'SIM',
2: 'Zoom',
3: 'SRM',
4: 'CRM',
5: 'Any',
6: 'Q1MS',
7: 'Q3MS',
}
def __init__(self, filename, **kwargs):
self.filename = os.path.abspath(filename)
self.filename = os.path.normpath(self.filename)
self.source = ThermoFisher.CommonCore.RawFileReader.RawFileReaderAdapter.FileFactory(self.filename)
if not self.source.IsOpen:
raise IOError(
"RAWfile '{0}' could not be opened, is the file accessible ?".format(
self.filename))
self.source.SelectInstrument(ThermoFisher.CommonCore.Data.Business.Device.MS, 1)
self.StartTime = self.GetStartTime()
self.EndTime = self.GetEndTime()
self.FirstSpectrumNumber = self.GetFirstSpectrumNumber()
self.LastSpectrumNumber = self.GetLastSpectrumNumber()
self.LowMass = self.GetLowMass()
self.HighMass = self.GetHighMass()
self.MassResolution = self.GetMassResolution()
self.NumSpectra = self.GetNumSpectra()
def Close(self):
"""Closes a raw file and frees the associated memory."""
self.source.Dispose()
def GetFileName(self):
"""Returns the fully qualified path name of an open raw file."""
return self.source.FileName
def GetCreatorID(self):
"""Returns the creator ID. The creator ID is the
logon name of the user when the raw file was acquired."""
return self.source.CreatorId
def GetCreationDate(self):
"""Returns the file creation date in DATE format."""
# https://msdn.microsoft.com/en-us/library/82ab7w69.aspx
# The DATE type is implemented using an 8-byte floating-point number
return self.source.CreationDate.ToOADate()
def IsError(self):
"""Returns the error state flag of the raw file. A return value of TRUE indicates that an error has
occurred. For information about the error, call the GetErrorCode or GetErrorMessage
functions."""
return self.source.IsError
def IsThereMSData(self):
"""This function checks to see if there is MS data in the raw file. A return value of TRUE means
that the raw file contains MS data. You must open the raw file before performing this check."""
return self.source.HasMsData
def InAcquisition(self):
"""Returns the acquisition state flag of the raw file. A return value of TRUE indicates that the
raw file is being acquired or that all open handles to the file during acquisition have not been
closed."""
return self.source.InAcquisition
def RefreshViewOfFile(self):
"""Refreshes the view of a file currently being acquired. This function provides a more efficient
mechanism for gaining access to new data in a raw file during acquisition without closing and
reopening the raw file. This function has no effect with files that are not being acquired."""
return self.source.RefreshViewOfFile
def GetExpectedRunTime(self):
"""Gets the expected acquisition run time for the current controller. The actual acquisition may
be longer or shorter than this value. This value is intended to allow displays to show the
expected run time on chromatograms. To obtain an accurate run time value during or after
acquisition, use the GetEndTime function."""
return self.source.ExpectedRunTime
def GetNumTrailerExtra(self):
"""Gets the trailer extra entries recorded for the current controller. Trailer extra entries are only
supported for MS device controllers and are used to store instrument specific information for
each scan if used."""
return self.source.RunHeaderEx.TrailerExtraCount
def GetMaxIntegratedIntensity(self):
"""Gets the highest integrated intensity of all the scans for the current controller. This value is
only relevant to MS device controllers."""
return self.source.RunHeaderEx.MaxIntegratedIntensity
def GetMaxIntensity(self):
"""Gets the highest base peak of all the scans for the current controller. This value is only relevant
to MS device controllers."""
return self.source.RunHeaderEx.MaxIntensity
def GetComment1(self):
"""Returns the first comment for the current controller. This value is typically only set for raw
files converted from other file formats."""
return self.source.RunHeaderEx.Comment1
def GetComment2(self):
"""Returns the second comment for the current controller. This value is typically only set for raw
files converted from other file formats."""
return self.source.RunHeaderEx.Comment2
def GetFilters(self):
"""Returns the list of unique scan filters for the raw file. This function is only supported for MS
device controllers."""
return list(self.source.GetFilters())
# INSTRUMENT BEGIN
def GetInstName(self):
"""Returns the instrument name, if available, for the current controller."""
return String.Join(" -> ", self.source.GetAllInstrumentNamesFromInstrumentMethod())
# INSTRUMENT END
def GetScanEventStringForScanNum(self, scanNumber):
"""This function returns scan event information as a string for the specified scan number."""
return self.source.GetScanEventStringForScanNumber(scanNumber)
def GetNumberOfMassRangesFromScanNum(self, scanNumber):
"""This function gets the number of MassRange data items in the scan."""
return IScanEventBase(self.source.GetScanEventForScanNumber(scanNumber)).MassRangeCount
def GetMassRangeFromScanNum(self, scanNumber, massRangeIndex):
"""This function retrieves information about the mass range data of a scan (high and low
masses). You can find the count of mass ranges for the scan by calling
GetNumberOfMassRangesFromScanNum()."""
range = IScanEventBase(self.source.GetScanEventForScanNumber(scanNumber)).GetMassRange(massRangeIndex)
return range.Low, range.High
def GetNumberOfSourceFragmentsFromScanNum(self, scanNumber):
"""This function gets the number of source fragments (or compensation voltages) in the scan."""
return IScanEventBase(self.source.GetScanEventForScanNumber(scanNumber)).SourceFragmentationInfoCount
def GetSourceFragmentValueFromScanNum(self, scanNumber, sourceFragmentIndex):
"""This function retrieves information about one of the source fragment values of a scan. It is
also the same value as the compensation voltage. You can find the count of source fragments
for the scan by calling GetNumberOfSourceFragmentsFromScanNum ()."""
return IScanEventBase(self.source.GetScanEventForScanNumber(scanNumber)).GetSourceFragmentationInfo(sourceFragmentIndex)
def GetIsolationWidthForScanNum(self, scanNumber, MSOrder = 0):
"""This function returns the isolation width for the scan specified by scanNumber and the
transition specified by MSOrder (0 for MS1?) from the scan event structure in the raw file."""
return IScanEventBase(self.source.GetScanEventForScanNumber(scanNumber)).GetIsolationWidth(MSOrder)
def GetCollisionEnergyForScanNum(self, scanNumber, MSOrder = 0):
"""This function returns the collision energy for the scan specified by scanNumber and the
transition specified by MSOrder (0 for MS1?) from the scan event structure in the raw file. """
return IScanEventBase(self.source.GetScanEventForScanNumber(scanNumber)).GetEnergy(MSOrder)
def GetActivationTypeForScanNum(self, scanNumber, MSOrder = 0):
"""This function returns the activation type for the scan specified by scanNumber and the
transition specified by MSOrder from the scan event structure in the RAW file.
The value returned in the pnActivationType variable is one of the following:
CID 0
MPD 1
ECD 2
PQD 3
ETD 4
HCD 5
Any activation type 6
SA 7
PTR 8
NETD 9
NPTR 10
UVPD 11"""
return RawFileReader.activationType[IScanEventBase(self.source.GetScanEventForScanNumber(scanNumber)).GetActivation(MSOrder)]
def GetMassAnalyzerTypeForScanNum(self, scanNumber):
"""This function returns the mass analyzer type for the scan specified by scanNumber from the
scan event structure in the RAW file. The value of scanNumber must be within the range of
scans or readings for the current controller. The range of scans or readings for the current
controller may be obtained by calling GetFirstSpectrumNumber and
GetLastSpectrumNumber.
return RawFileReader.massAnalyzerType[IScanEventBase(self.source.GetScanEventForScanNumber(scanNumber)).MassAnalyzer]"""
def GetDetectorTypeForScanNum(self, scanNumber):
"""This function returns the detector type for the scan specified by scanNumber from the scan
event structure in the RAW file."""
return RawFileReader.detectorType[IScanEventBase(self.source.GetScanEventForScanNumber(scanNumber)).Detector]
def GetNumberOfMassCalibratorsFromScanNum(self, scanNumber):
"""This function gets the number of mass calibrators (each of which is a double) in the scan."""
return IScanEvent(self.source.GetScanEventForScanNumber(scanNumber)).MassCalibratorCount
def GetMassCalibrationValueFromScanNum(self, scanNumber, massCalibrationIndex):
"""This function retrieves information about one of the mass calibration data values of a scan.
You can find the count of mass calibrations for the scan by calling
GetNumberOfMassCalibratorsFromScanNum()."""
return IScanEvent(self.source.GetScanEventForScanNumber(scanNumber)).GetMassCalibrator(massCalibrationIndex)
def GetMassResolution(self):
| |
<gh_stars>1-10
"""
File that involves dataloaders for the Visual Genome dataset.
"""
import json
import os
import h5py
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
from dataloaders.blob import Blob
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from config import VG_IMAGES, IM_DATA_FN, VG_SGG_FN, VG_SGG_DICT_FN, BOX_SCALE, IM_SCALE, PROPOSAL_FN
from dataloaders.image_transforms import SquarePad, Grayscale, Brightness, Sharpness, Contrast, \
RandomOrder, Hue, random_crop
from collections import defaultdict
from pycocotools.coco import COCO
class VG(Dataset):
def __init__(self, mode, roidb_file=VG_SGG_FN, dict_file=VG_SGG_DICT_FN,
image_file=IM_DATA_FN, filter_empty_rels=True, num_im=-1, num_val_im=5000,
filter_duplicate_rels=True, filter_non_overlap=True,
use_proposals=False):
"""
Torch dataset for VisualGenome
:param mode: Must be train, test, or val
:param roidb_file: HDF5 containing the GT boxes, classes, and relationships
:param dict_file: JSON Contains mapping of classes/relationships to words
:param image_file: HDF5 containing image filenames
:param filter_empty_rels: True if we filter out images without relationships between
boxes. One might want to set this to false if training a detector.
:param filter_duplicate_rels: Whenever we see a duplicate relationship we'll sample instead
:param num_im: Number of images in the entire dataset. -1 for all images.
:param num_val_im: Number of images in the validation set (must be less than num_im
unless num_im is -1.)
:param proposal_file: If None, we don't provide proposals. Otherwise file for where we get RPN
proposals
"""
if mode not in ('test', 'train', 'val'):
raise ValueError("Mode must be in test, train, or val. Supplied {}".format(mode))
self.mode = mode
# Initialize
self.roidb_file = roidb_file
self.dict_file = dict_file
self.image_file = image_file
self.filter_non_overlap = filter_non_overlap
self.filter_duplicate_rels = filter_duplicate_rels and self.mode == 'train'
self.split_mask, self.gt_boxes, self.gt_classes, self.relationships = load_graphs(
self.roidb_file, self.mode, num_im, num_val_im=num_val_im,
filter_empty_rels=filter_empty_rels,
filter_non_overlap=self.filter_non_overlap and self.is_train,
)
self.filenames = load_image_filenames(image_file)
self.filenames = [self.filenames[i] for i in np.where(self.split_mask)[0]]
assert len(self.filenames) == len(self.gt_classes)
self.ind_to_classes, self.ind_to_predicates = load_info(dict_file)
if use_proposals:
print("Loading proposals", flush=True)
p_h5 = h5py.File(PROPOSAL_FN, 'r')
rpn_rois = p_h5['rpn_rois']
rpn_scores = p_h5['rpn_scores']
rpn_im_to_roi_idx = np.array(p_h5['im_to_roi_idx'][self.split_mask])
rpn_num_rois = np.array(p_h5['num_rois'][self.split_mask])
self.rpn_rois = []
for i in range(len(self.filenames)):
rpn_i = np.column_stack((
rpn_scores[rpn_im_to_roi_idx[i]:rpn_im_to_roi_idx[i] + rpn_num_rois[i]],
rpn_rois[rpn_im_to_roi_idx[i]:rpn_im_to_roi_idx[i] + rpn_num_rois[i]],
))
self.rpn_rois.append(rpn_i)
else:
self.rpn_rois = None
# You could add data augmentation here. But we didn't.
# tform = []
# if self.is_train:
# tform.append(RandomOrder([
# Grayscale(),
# Brightness(),
# Contrast(),
# Sharpness(),
# Hue(),
# ]))
tform = [
SquarePad(),
Resize(IM_SCALE),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
self.transform_pipeline = Compose(tform)
@property
def coco(self):
"""
:return: a Coco-like object that we can use to evaluate detection!
"""
anns = []
for i, (cls_array, box_array) in enumerate(zip(self.gt_classes, self.gt_boxes)):
for cls, box in zip(cls_array.tolist(), box_array.tolist()):
anns.append({
'area': (box[3] - box[1] + 1) * (box[2] - box[0] + 1),
'bbox': [box[0], box[1], box[2] - box[0] + 1, box[3] - box[1] + 1],
'category_id': cls,
'id': len(anns),
'image_id': i,
'iscrowd': 0,
})
fauxcoco = COCO()
fauxcoco.dataset = {
'info': {'description': 'ayy lmao'},
'images': [{'id': i} for i in range(self.__len__())],
'categories': [{'supercategory': 'person',
'id': i, 'name': name} for i, name in enumerate(self.ind_to_classes) if name != '__background__'],
'annotations': anns,
}
fauxcoco.createIndex()
return fauxcoco
@property
def is_train(self):
return self.mode.startswith('train')
@classmethod
def splits(cls, *args, **kwargs):
""" Helper method to generate splits of the dataset"""
train = cls('train', *args, **kwargs)
val = cls('val', *args, **kwargs)
test = cls('test', *args, **kwargs)
return train, val, test
def __getitem__(self, index):
image_unpadded = Image.open(self.filenames[index]).convert('RGB')
# Optionally flip the image if we're doing training
flipped = self.is_train and np.random.random() > 0.5
gt_boxes = self.gt_boxes[index].copy()
# Boxes are already at BOX_SCALE
if self.is_train:
# crop boxes that are too large. This seems to be only a problem for image heights, but whatevs
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]].clip(
None, BOX_SCALE / max(image_unpadded.size) * image_unpadded.size[1])
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]].clip(
None, BOX_SCALE / max(image_unpadded.size) * image_unpadded.size[0])
# # crop the image for data augmentation
# image_unpadded, gt_boxes = random_crop(image_unpadded, gt_boxes, BOX_SCALE, round_boxes=True)
w, h = image_unpadded.size
box_scale_factor = BOX_SCALE / max(w, h)
if flipped:
scaled_w = int(box_scale_factor * float(w))
# print("Scaled w is {}".format(scaled_w))
image_unpadded = image_unpadded.transpose(Image.FLIP_LEFT_RIGHT)
gt_boxes[:, [0, 2]] = scaled_w - gt_boxes[:, [2, 0]]
img_scale_factor = IM_SCALE / max(w, h)
if h > w:
im_size = (IM_SCALE, int(w * img_scale_factor), img_scale_factor)
elif h < w:
im_size = (int(h * img_scale_factor), IM_SCALE, img_scale_factor)
else:
im_size = (IM_SCALE, IM_SCALE, img_scale_factor)
gt_rels = self.relationships[index].copy()
if self.filter_duplicate_rels:
# Filter out dupes!
assert self.mode == 'train'
old_size = gt_rels.shape[0]
all_rel_sets = defaultdict(list)
for (o0, o1, r) in gt_rels:
all_rel_sets[(o0, o1)].append(r)
gt_rels = [(k[0], k[1], np.random.choice(v)) for k,v in all_rel_sets.items()]
gt_rels = np.array(gt_rels)
entry = {
'img': self.transform_pipeline(image_unpadded),
'img_size': im_size,
'gt_boxes': gt_boxes,
'gt_classes': self.gt_classes[index].copy(),
'gt_relations': gt_rels,
'scale': IM_SCALE / BOX_SCALE, # Multiply the boxes by this.
'index': index,
'flipped': flipped,
'fn': self.filenames[index],
}
if self.rpn_rois is not None:
entry['proposals'] = self.rpn_rois[index]
assertion_checks(entry)
return entry
def __len__(self):
return len(self.filenames)
@property
def num_predicates(self):
return len(self.ind_to_predicates)
@property
def num_classes(self):
return len(self.ind_to_classes)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# MISC. HELPER FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def assertion_checks(entry):
im_size = tuple(entry['img'].size())
if len(im_size) != 3:
raise ValueError("Img must be dim-3")
c, h, w = entry['img'].size()
if c != 3:
raise ValueError("Must have 3 color channels")
num_gt = entry['gt_boxes'].shape[0]
if entry['gt_classes'].shape[0] != num_gt:
raise ValueError("GT classes and GT boxes must have same number of examples")
assert (entry['gt_boxes'][:, 2] >= entry['gt_boxes'][:, 0]).all()
assert (entry['gt_boxes'] >= -1).all()
def load_image_filenames(image_file, image_dir=VG_IMAGES):
"""
Loads the image filenames from visual genome from the JSON file that contains them.
This matches the preprocessing in scene-graph-TF-release/data_tools/vg_to_imdb.py.
:param image_file: JSON file. Elements contain the param "image_id".
:param image_dir: directory where the VisualGenome images are located
:return: List of filenames corresponding to the good images
"""
with open(image_file, 'r') as f:
im_data = json.load(f)
print ('image dir ', image_dir)
corrupted_ims = ['1592.jpg', '1722.jpg', '4616.jpg', '4617.jpg']
fns = []
for i, img in enumerate(im_data):
basename = '{}.jpg'.format(img['image_id'])
if basename in corrupted_ims:
continue
filename = os.path.join(image_dir, basename)
if os.path.exists(filename):
fns.append(filename)
#if i%1000==0 and i>0:
# break
print ('number of images ', len(fns))
return fns
def load_graphs(graphs_file, mode='train', num_im=-1, num_val_im=0, filter_empty_rels=True,
filter_non_overlap=False):
"""
Load the file containing the GT boxes and relations, as well as the dataset split
:param graphs_file: HDF5
:param mode: (train, val, or test)
:param num_im: Number of images we want
:param num_val_im: Number of validation images
:param filter_empty_rels: (will be filtered otherwise.)
:param filter_non_overlap: If training, filter images that dont overlap.
:return: image_index: numpy array corresponding to the index of images we're using
boxes: List where each element is a [num_gt, 4] array of ground
truth boxes (x1, y1, x2, y2)
gt_classes: List where each element is a [num_gt] array of classes
relationships: List where each element is a [num_r, 3] array of
(box_ind_1, box_ind_2, predicate) relationships
"""
if mode not in ('train', 'val', 'test'):
raise ValueError('{} invalid'.format(mode))
roi_h5 = h5py.File(graphs_file, 'r')
data_split = roi_h5['split'][:]
split = 2 if mode == 'test' else 0
split_mask = data_split == split
# Filter out images without bounding boxes
split_mask &= roi_h5['img_to_first_box'][:] >= 0
if filter_empty_rels:
split_mask &= roi_h5['img_to_first_rel'][:] >= 0
image_index = np.where(split_mask)[0]
if num_im > -1:
image_index = image_index[:num_im]
if num_val_im > 0:
if mode == 'val':
image_index = image_index[:num_val_im]
elif mode == 'train':
image_index = image_index[num_val_im:]
split_mask = np.zeros_like(data_split).astype(bool)
split_mask[image_index] = True
# Get box information
all_labels = roi_h5['labels'][:, 0]
all_boxes = roi_h5['boxes_{}'.format(BOX_SCALE)][:] # will index later
assert np.all(all_boxes[:, :2] >= 0) # sanity check
assert np.all(all_boxes[:, 2:] > 0) # no empty box
# convert from xc, yc, w, h to x1, y1, x2, y2
all_boxes[:, :2] = all_boxes[:, :2] - all_boxes[:, 2:] / 2
all_boxes[:, 2:] = all_boxes[:, :2] + all_boxes[:, 2:]
im_to_first_box = roi_h5['img_to_first_box'][split_mask]
im_to_last_box = roi_h5['img_to_last_box'][split_mask]
im_to_first_rel = roi_h5['img_to_first_rel'][split_mask]
im_to_last_rel = roi_h5['img_to_last_rel'][split_mask]
# load relation labels
_relations = roi_h5['relationships'][:]
_relation_predicates = roi_h5['predicates'][:, 0]
assert (im_to_first_rel.shape[0] == im_to_last_rel.shape[0])
assert (_relations.shape[0] == _relation_predicates.shape[0]) # sanity check
# Get everything by image.
boxes = []
gt_classes = []
relationships = []
for i in range(len(image_index)):
boxes_i = all_boxes[im_to_first_box[i]:im_to_last_box[i] + 1, :]
gt_classes_i = all_labels[im_to_first_box[i]:im_to_last_box[i] + 1]
if im_to_first_rel[i] >= 0:
predicates = _relation_predicates[im_to_first_rel[i]:im_to_last_rel[i] + 1]
obj_idx = _relations[im_to_first_rel[i]:im_to_last_rel[i] + 1] - im_to_first_box[i]
assert np.all(obj_idx >= 0)
assert np.all(obj_idx < | |
import re
from typing import Dict, List, Union, Tuple, Callable, Any
import ipywidgets as ipw
import traitlets
from optimade.models.utils import CHEMICAL_SYMBOLS
from aiidalab_optimade.exceptions import ParserError
from aiidalab_optimade.logger import LOGGER
__all__ = ("FilterTabs",)
class FilterTabs(ipw.Tab):
"""Separate filter inputs into tabs"""
def __init__(self, **kwargs):
sections: Tuple[Tuple[str, FilterTabSection]] = (
("Basic", FilterInputs()),
# ("Advanced", ipw.HTML("This input tab has not yet been implemented.")),
("Raw", FilterRaw()),
)
super().__init__(
children=tuple(_[1] for _ in sections),
layout={"width": "auto", "height": "auto"},
)
for index, title in enumerate([_[0] for _ in sections]):
self.set_title(index, title)
def freeze(self):
"""Disable widget"""
for widget in self.children:
if not isinstance(widget, ipw.HTML):
widget.freeze()
def unfreeze(self):
"""Activate widget (in its current state)"""
for widget in self.children:
if not isinstance(widget, ipw.HTML):
widget.unfreeze()
def reset(self):
"""Reset widget"""
for widget in self.children:
if not isinstance(widget, ipw.HTML):
widget.reset()
def collect_value(self) -> str:
"""Collect inputs to a single OPTIMADE filter query string"""
active_widget = self.children[self.selected_index]
if not isinstance(active_widget, ipw.HTML):
return active_widget.collect_value()
return ""
def on_submit(self, callback, remove=False):
"""(Un)Register a callback to handle text submission"""
for section_widget in self.children:
section_widget.on_submit(callback=callback, remove=remove)
def update_range_filters(self, data: Dict[str, dict]):
"""Update filter widgets with a range (e.g., IntRangeSlider) according to `data`"""
for section_widget in self.children:
section_widget.range_nx = data
class FilterTabSection(ipw.VBox):
"""Base class for a filter tab section"""
range_nx = traitlets.Dict(allow_none=True)
@traitlets.observe("range_nx")
def update_ranged_inputs(self, change: dict):
"""Update ranged inputs' min/max values"""
def collect_value(self) -> str:
"""Collect inputs to a single OPTIMADE filter query string"""
def on_submit(self, callback, remove=False):
"""(Un)Register a callback to handle user input submission"""
class FilterRaw(FilterTabSection):
"""Filter inputs for raw input"""
def __init__(self, **kwargs):
self.inputs = [
FilterInput(
description="Filter",
hint="Raw 'filter' query string ...",
description_width="50px",
)
]
super().__init__(children=self.inputs, layout={"width": "auto"}, **kwargs)
def reset(self):
"""Reset widget"""
for user_input in self.inputs:
user_input.reset()
def freeze(self):
"""Disable widget"""
for user_input in self.inputs:
user_input.freeze()
def unfreeze(self):
"""Activate widget (in its current state)"""
for user_input in self.inputs:
user_input.unfreeze()
def collect_value(self) -> str:
"""Collect inputs to a single OPTIMADE filter query string"""
filter_ = self.inputs[0]
return filter_.get_user_input.strip()
def on_submit(self, callback, remove=False):
"""(Un)Register a callback to handle user input submission"""
for user_input in self.inputs:
user_input.on_submit(callback=callback, remove=remove)
class FilterInput(ipw.HBox):
"""Combination of HTML and input widget for filter inputs
:param kwargs: Keyword arguments passed on to `input_widget`
"""
def __init__(
self,
description: str,
input_widget: Callable = None,
hint: str = None,
description_width: str = None,
**kwargs,
):
_description_width = (
description_width if description_width is not None else "170px"
)
description = ipw.HTML(description, layout={"width": _description_width})
_layout = {"width": "100%"}
self.input_widget = (
input_widget(layout=_layout, **kwargs)
if input_widget is not None
else ipw.Text(layout=_layout)
)
if hint and isinstance(self.input_widget, ipw.widgets.widget_string._String):
self.input_widget.placeholder = hint
super().__init__(
children=[description, self.input_widget], layout=ipw.Layout(width="auto")
)
@property
def get_user_input(self):
"""The Widget.value"""
return self.input_widget.value
def reset(self):
"""Reset widget"""
with self.hold_trait_notifications():
self.input_widget.value = ""
self.input_widget.disabled = False
def freeze(self):
"""Disable widget"""
self.input_widget.disabled = True
def unfreeze(self):
"""Activate widget (in its current state)"""
self.input_widget.disabled = False
def on_submit(self, callback, remove=False):
"""(Un)Register a callback to handle text submission"""
if isinstance(self.input_widget, ipw.Text):
self.input_widget._submission_callbacks.register_callback( # pylint: disable=protected-access
callback, remove=remove
)
class FilterInputParser:
"""Parse user input for filters"""
def __default__(self, value: Any) -> Any: # pylint: disable=no-self-use
"""Default parsing fallback function"""
return value
def parse(self, key: str, value: Any) -> Any:
"""Reroute to self.<key>(value)"""
if isinstance(value, str):
# Remove any superfluous whitespace at the beginning and end of string values
value = value.strip()
func = getattr(self, key, None)
if func is None:
return self.__default__(value)
return func(value)
@staticmethod
def chemical_formula_descriptive(value: str) -> str:
"""Chemical formula descriptive is a free form input"""
value = value.replace('"', "")
return f'"{value}"' if value else ""
@staticmethod
def dimension_types(value: str) -> str:
"""Map to correct dimension_types value"""
mapping = {
"0": "1", # [0,0,0]
"1": "ALL 0,1", # [0,0,1] not working at the moment
"2": "ALL 0,1", # [1,0,1] not working at the moment
"3": "0", # [1,1,1]
}
return mapping.get(value, value)
@staticmethod
def lattice_vectors(value: str) -> str:
"""Wrap in query list of values"""
if value.find("(") != -1 and value.find(")") != -1:
pass
# wrappers = ("(", ")")
elif value.find("[") != -1 and value.find("]") != -1:
pass
# wrappers = ("[", "]")
else:
raise ParserError(
"Wrong input. Should be e.g. (4.1, 0, 0) (0, 4.1, 0) (0, 0, 4.1)",
"lattica_vectors",
value,
)
raise ParserError("Not yet implemented.", "lattice_vectors", value)
# for vector in re.finditer(f"{wrappers[0]}.*{wrappers[1]}", value):
# vector.
@staticmethod
def operator_and_integer(field: str, value: str) -> str:
"""Handle operator for values with integers and a possible operator prefixed"""
LOGGER.debug(
"Parsing input with operator_and_integer. <field: %r>, <value: %r>",
field,
value,
)
match_operator = re.findall(r"[<>]?=?", value)
match_no_operator = re.findall(r"^\s*[0-9]+", value)
LOGGER.debug(
"Finding all operators (or none):\nmatch_operator: %r\nmatch_no_operator: %r",
match_operator,
match_no_operator,
)
if match_operator and any(match_operator):
match_operator = [_ for _ in match_operator if _]
if len(match_operator) != 1:
raise ParserError(
"Multiple values given with operators.",
field,
value,
extras=("match_operator", match_operator),
)
number = re.findall(r"[0-9]+", value)[0]
operator = match_operator[0].replace(r"\s*", "")
return f"{operator}{number}"
if match_no_operator and any(match_no_operator):
match_no_operator = [_ for _ in match_no_operator if _]
if len(match_no_operator) != 1:
raise ParserError(
"Multiple values given, must be an integer, "
"either with or without an operator prefixed.",
field,
value,
extras=("match_no_operator", match_no_operator),
)
result = match_no_operator[0].replace(r"\s*", "")
return f"={result}"
raise ParserError(
"Not proper input. Should be, e.g., '>=3' or '5'",
field,
value,
extras=[
("match_operator", match_operator),
("match_no_operator", match_no_operator),
],
)
@staticmethod
def ranged_int(field: str, value: Tuple[int, int]) -> str:
"""Turn IntRangeSlider widget value into OPTIMADE filter string"""
LOGGER.debug("ranged_int: Received value %r for field %r", value, field)
low, high = value
if low == high:
# Exactly N of property
res = f"={low}"
else:
# Range of property
res = [f">={low}", f"<={high}"]
LOGGER.debug("ranged_int: Concluded the response is %r", res)
return res
def nsites(self, value: Tuple[int, int]) -> Union[List[str], str]:
"""Operator with integer values"""
return self.ranged_int("nsites", value)
def nelements(self, value: Tuple[int, int]) -> Union[List[str], str]:
"""Operator with integer values"""
return self.ranged_int("nelements", value)
@staticmethod
def elements(value: str) -> str:
"""Check against optimade-python-tools list of elememnts"""
results = []
symbols = re.findall(r",?\s*[\"']?([A-Za-z]+)[\"']?,?\s*", value)
for symbol in symbols:
if symbol == "":
continue
escaped_symbol = symbol.strip().replace(r"\W", "")
escaped_symbol = escaped_symbol.capitalize()
if escaped_symbol not in CHEMICAL_SYMBOLS:
raise ParserError(
f"{escaped_symbol} is not a valid element.", "elements", value
)
results.append(escaped_symbol)
return ",".join([f'"{symbol}"' for symbol in results])
class FilterInputs(FilterTabSection):
"""Filter inputs in a single widget"""
provider_section = traitlets.List()
FILTER_SECTIONS = [
(
"Chemistry",
[
(
"chemical_formula_descriptive",
{"description": "Chemical Formula", "hint": "e.g., (H2O)2 Na"},
),
("elements", {"description": "Elements", "hint": "H, O, Cl, ..."}),
(
"nelements",
{
"description": "Number of Elements",
"input_widget": ipw.IntRangeSlider,
"min": 0,
"max": len(CHEMICAL_SYMBOLS),
"value": (0, len(CHEMICAL_SYMBOLS)),
},
),
],
),
(
"Cell",
[
(
"dimension_types",
{
"description": "Dimensions",
"hint": "0: Molecule, 3: Bulk, (Not supported: 1: Wire, 2: Planar)",
},
),
(
"nsites",
{
"description": "Number of Sites",
"input_widget": ipw.IntRangeSlider,
"min": 0,
"max": 10000,
"value": (0, 10000),
},
),
],
),
(
"Provider specific",
[
(
"id",
{"description": "Provider ID", "hint": "NB! Will take precedence"},
)
],
),
]
FIELD_MAP = {"dimension_types": "NOT dimension_types"}
OPERATOR_MAP = {
"chemical_formula_descriptive": " CONTAINS ",
"elements": " HAS ALL ",
"nelements": "",
"dimension_types": " HAS ",
"lattice_vectors": " HAS ANY ",
"nsites": "",
"id": "=",
}
def __init__(self, **kwargs):
self.query_fields: Dict[str, FilterInput] = {}
self._layout = ipw.Layout(width="auto")
sections = [
self.new_section(title, inputs) for title, inputs in self.FILTER_SECTIONS
]
# Remove initial line-break
sections[0].children[0].value = sections[0].children[0].value[len("<br>") :]
super().__init__(children=sections, layout=self._layout, **kwargs)
def reset(self):
"""Reset widget"""
for user_input in self.query_fields.values():
user_input.reset()
def freeze(self):
"""Disable widget"""
for user_input in self.query_fields.values():
user_input.freeze()
def unfreeze(self):
"""Activate widget (in its current state)"""
for user_input in self.query_fields.values():
user_input.unfreeze()
@traitlets.observe("range_nx")
def update_ranged_inputs(self, change: dict):
"""Update ranged inputs' min/max values"""
ranges = change["new"]
if not ranges or ranges is None:
return
for field, config in ranges.items():
if field not in self.query_fields:
raise ParserError(
field=field,
value="N/A",
extras=[
("config", config),
("self.query_fields.keys", self.query_fields.keys()),
],
msg="Provided field is unknown. Can not update range for unknown field.",
)
widget = self.query_fields[field].input_widget
cached_value: Tuple[int, int] = widget.value
for attr in ("min", "max"):
if attr in config:
try:
new_value = int(config[attr])
except (TypeError, ValueError) as exc:
raise ParserError(
field=field,
value=cached_value,
extras=[("attr", attr), ("config[attr]", config[attr])],
msg=f"Could not cast config[attr] to int. Exception: {exc!s}",
| |
import string
class Equacalc:
def __init__(self):
var = 0
Raddvar = ''
Rmulvar = ''
Rvarlist = []
Laddvar = ''
Lmulvar = ''
Lvarlist = []
tempvar = 0
templisty = []
newequa1 = 0
negmulflag = 0
negmulflag2 = 0
Laddsubliketerms = 0
Raddsubliketerms = 0
Lmuldivliketerms = 0
Rmuldivliketerms = 0
Lmuldivlist = []
Rmuldivlist = []
Lmulvarlist = []
Rmulvarlist = []
alphabet = list(string.ascii_lowercase)
strint = ''
self.onlyOnce = 1
Rblanky = []
Lblanky = []
Lbint = 0
Lbvar = 0
Llisty = []
Rlisty = []
variable = 'n'
equation = str(raw_input('Type Equation: '))
print equation
for i in equation:
if i in alphabet:
variable = i
equa = ''
for i in equation:
equa += i
if i == '=':
break
equa2 = ''
flag = False
for i in equation:
if flag:
equa2 += i
if i == '=':
flag = True
equa2 += '='
for j in range(len(equa)):
if self.onlyOnce == 1:
self.onlyOnce = 0
if equa[0] == '0' or equa[0] == '1' or equa[0] == '2' or equa[0] == '3' or equa[0] == '4' or equa[0] == '5' or equa[0] == '6' or equa[0] == '7' or equa[0] == '8' or equa[0] == '9' or equa[0] == '-':
increment = 0
while equa[increment] != '+':
increment += 1
if equa[increment] == '-' or equa[increment] == '=' or equa[increment] == '*':
break
temp = ''
temp += equa[j:increment]
if equa[increment] == '*' and not temp.startswith('*'):
temp = '*' + temp
Lblanky.append(temp)
elif equa[j] == '+' or equa[j] == '-' or equa[j] == '*':
increment = j
while equa[increment] != '=':
if negmulflag == 1:
negmulflag = 0
break
increment += 1
if equa[increment] == '+' or equa[increment] == '-' or equa[increment] == '*':
if equa[j] == '*' and (equa[j+1] == '-' or equa[j+1] == '+'):
while equa[increment] != '=':
negmulflag = 1
increment += 1
if equa[increment] == '+' or equa[increment] == '-' or equa[increment] == '*':
print 'hi'
break
break
temp = ''
temp += equa[j:increment]
if equa[increment] == '*' and not temp.startswith('*'):
temp = '*' + temp
Lmuldivlist.append(temp)
elif temp != '':
Lblanky.append(temp)
self.onlyOnce = 1
for i in range(len(equa2)):
if self.onlyOnce == 1:
self.onlyOnce = 0
if equa2[0] == '0' or equa2[0] == '1' or equa2[0] == '2' or equa2[0] == '3' or equa2[0] == '4' or equa2[0] == '5' or equa2[0] == '6' or equa2[0] == '7' or equa2[0] == '8' or equa2[0] == '9' or equa2[0] == '-':
increment = 0
while equa2[increment] != '+':
increment += 1
if equa2[increment] == '-' or equa2[increment] == '=' or equa2[increment] == '*':
break
temp = ''
temp += equa2[i:increment]
if equa2[increment] == '*' and not temp.startswith('*'):
temp = '*' + temp
Rblanky.append(temp)
elif equa2[i] == '+' or equa2[i] == '-' or equa2[i] == '*':
increment = i
while equa2[increment] != '=':
if negmulflag2 == 1:
negmulflag2 = 0
break
increment += 1
if equa2[increment] == '+' or equa2[increment] == '-' or equa2[increment] == '*':
if equa2[i] == '*' and (equa2[i+1] == '-' or equa2[i+1] == '+'):
while equa2[increment] != '=':
negmulflag2 = 1
increment += 1
if equa2[increment] == '+' or equa2[increment] == '-' or equa2[increment] == '*':
print 'hi'
break
elif equa2[i] == '+' and (equa2[i+1] == '-' or equa2[i+1] == '+'):
while equa2[increment] != '=':
negmulflag2 = 1
increment += 1
if equa2[increment] == '+' or equa2[increment] == '-' or equa2[increment] == '*':
print 'hi'
break
break
temp = ''
temp += equa2[i:increment]
if equa2[increment] == '*' and not temp.startswith('*'):
temp = '*' + temp
Rmuldivlist.append(temp)
elif temp != '':
Rblanky.append(temp)
for i in Lblanky:
j = i
intflag = True
strint = ''
if j[0] == '+' and j[-1] not in alphabet:
strint = j
Llisty.append(int(strint))
intflag = False
if j[0] == '*' and j[-1] not in alphabet:
strint = list(j)
strint.remove(strint[0])
strint = ''.join(strint)
Lmuldivlist.append(int(strint))
intflag = False
if intflag:
if j[0] == '-' and j[-1] not in alphabet:
strint = j
Llisty.append(int(strint))
elif j[-1] not in alphabet:
if j[0] != '+' or j[0] != '-' or j[0] != '*':
Llisty.append(int(j))
elif j[-1] in alphabet:
tempjlist = list(j)
tempjlist.remove(tempjlist[-1])
strtempj = ''.join(tempjlist)
if strtempj == '-':
strtempj = '-1'
elif strtempj == '+':
strtempj = '+1'
if strtempj.startswith('*'):
tempwar2 = list(strtempj)
tempwar2.remove(tempwar2[0])
strtempj = int(''.join(tempwar2))
Lmulvarlist.append(strtempj)
else:
inttempj = int(strtempj)
Lvarlist.append(inttempj)
for i in Rblanky:
j = i
intflag = True
strint = ''
if j[0] == '+' and j[-1] not in alphabet:
if j[1] == '-':
j = j[1:]
Rlisty.append(int(j))
else:
strint = j
Rlisty.append(int(strint))
intflag = True
elif j[0] == '*' and j[-1] not in alphabet:
strint = list(j)
strint.remove(strint[0])
strint = ''.join(strint)
Rmuldivlist.append(int(strint))
intflag = False
elif intflag:
if j[0] == '-' and j[-1] not in alphabet:
strint = j
Rlisty.append(int(strint))
elif j[-1] not in alphabet:
if j[0] != '+' or j[0] != '-' or j[0] != '*':
Rlisty.append(int(j))
elif j[-1] in alphabet:
tempjlist = list(j)
tempjlist.remove(tempjlist[-1])
strtempj = ''.join(tempjlist)
if strtempj == '-':
strtempj = '-1'
elif strtempj == '+':
strtempj = '+1'
if strtempj.startswith('*'):
tempwar2 = list(strtempj)
tempwar2.remove(tempwar2[0])
strtempj = int(''.join(tempwar2))
Rmulvarlist.append(strtempj)
else:
inttempj = int(strtempj)
Rvarlist.append(inttempj)
tempvar = 0
for i in Llisty:
tempvar += i
Laddsubliketerms = str(tempvar)
tempvar = 1
if Lmuldivlist != []:
for i in Lmuldivlist:
tempvar *= i
Lmuldivliketerms = str(tempvar)
tempvar = 0
for i in Lvarlist:
tempvar += i
Laddvar = str(tempvar)
Laddvar += variable
tempvar = 1
if Lmulvarlist != []:
for i in Lmulvarlist:
tempvar *= i
Lmulvar = str(tempvar)
Lmulvar += variable
tempvar = 1
if Rmulvarlist != []:
for i in Rmulvarlist:
tempvar *= i
Rmulvar = str(tempvar)
Rmulvar += variable
tempvar = 0
for i in Rlisty:
tempvar += i
Raddsubliketerms = str(tempvar)
tempvar = 1
if Rmuldivlist != []:
for i in Rmuldivlist:
tempvar *= i
Rmuldivliketerms = str(tempvar)
tempvar = 0
for i in Rvarlist:
tempvar += i
Raddvar = str(tempvar)
Raddvar += variable
Raddtemp = int(Raddsubliketerms)
Rmultemp = int(Rmuldivliketerms)
Laddtemp = int(Laddsubliketerms)
#if Lmuldivliketerms[0] == '*' and Lmuldivliketerms[1] == '-':
# Lmuldivliketerms = Lmuldivliketerms[1:]
Lmultemp = int(Lmuldivliketerms)
Raddsubliketerms = str(Raddtemp + Rmultemp)
Laddsubliketerms = str(Laddtemp + Lmultemp)
if Raddsubliketerms == '0':
Raddsubliketerms = ''
elif Raddsubliketerms != '0':
Raddsubliketerms = '+' + Raddsubliketerms
if Raddtemp == 0:
Raddsubliketerms = Raddsubliketerms[1:]
Raddsubliketerms = '*' + Raddsubliketerms
if Laddsubliketerms == '0':
Laddsubliketerms = ''
elif Laddsubliketerms != '0':
Laddsubliketerms = '+' + Laddsubliketerms
if Laddtemp == 0:
Laddsubliketerms = Laddsubliketerms[1:]
Laddsubliketerms = '*' + Laddsubliketerms
temp0var = '0' + variable
if Laddvar == temp0var:
Laddvar = ''
elif Laddvar != temp0var:
Laddvar = '+' + Laddvar
if Raddvar == temp0var:
Raddvar = ''
elif Raddvar != temp0var:
Raddvar = '+' + Raddvar
if Lmulvar == variable:
Lmulvar = ''
elif Lmulvar != variable:
Lmulvar = '+' + Lmulvar
if Rmulvar == variable:
Rmulvar = ''
elif Rmulvar != variable:
Rmulvar = '+' + Rmulvar
if Lmulvar == '' and Rmulvar != '':
newequa1 = str(Laddvar + Lmulvar + Laddsubliketerms + '=' + Rmulvar + '^' + str(len(Rmulvarlist)) + Raddvar + Raddsubliketerms)
elif Lmulvar != '' and Rmulvar == '':
newequa1 = str(Laddvar + Lmulvar + '^' + str(len(Lmulvarlist)) + Laddsubliketerms + '=' + Rmulvar + Raddvar + Raddsubliketerms)
elif Lmulvar == '' and Rmulvar == '':
newequa1 = str(Laddvar + Lmulvar + Laddsubliketerms + '=' + Rmulvar + Raddvar + Raddsubliketerms)
elif Lmulvar != '' and Rmulvar != '':
newequa1 = str(Laddvar + Lmulvar + '^' + str(len(Lmulvarlist)) + Laddsubliketerms + '=' + Rmulvar + '^' + str(len(Rmulvarlist)) + Raddvar + Raddsubliketerms)
if newequa1.endswith('='):
newequa1 += '0'
print newequa1
newequa2 = ''
newequa3 = ''
for i in newequa1:
newequa2 += i
if i == '=':
break
flag = False
for i in newequa1:
if | |
math.cos(1.73021765373 + 104344.49901929598 * self.t)
Z0 += 0.00000000010 * math.cos(2.77364193598 + 77829.99768684579 * self.t)
Z0 += 0.00000000010 * math.cos(2.83314383566 + 104202.04936916218 * self.t)
Z0 += 0.00000000010 * math.cos(4.05963464737 + 26089.38761428249 * self.t)
Z0 += 0.00000000012 * math.cos(5.40316858194 + 110634.68841628819 * self.t)
Z0 += 0.00000000010 * math.cos(6.17247219966 + 103299.34418310839 * self.t)
Z0 += 0.00000000010 * math.cos(0.02153234343 + 137678.19129947099 * self.t)
Z0 += 0.00000000012 * math.cos(2.25188175428 + 52065.59996192899 * self.t)
Z0 += 0.00000000010 * math.cos(3.11650831039 + 26086.4186688659 * self.t)
Z0 += 0.00000000010 * math.cos(3.27287049435 + 103285.11708910679 * self.t)
Z0 += 0.00000000010 * math.cos(1.38532261128 + 25446.4895798352 * self.t)
Z0 += 0.00000000012 * math.cos(1.52019007531 + 54509.0026760204 * self.t)
Z0 += 0.00000000011 * math.cos(6.21574870463 + 61279.713277266 * self.t)
Z0 += 0.00000000009 * math.cos(0.49710408415 + 2221.856634597 * self.t)
Z0 += 0.00000000009 * math.cos(2.88821691711 + 90695.75207512038 * self.t)
Z0 += 0.00000000011 * math.cos(2.16522629249 + 26624.70765366959 * self.t)
Z0 += 0.00000000009 * math.cos(4.80391342687 + 27311.72098235281 * self.t)
Z0 += 0.00000000009 * math.cos(4.23373474411 + 52250.5878817157 * self.t)
Z0 += 0.00000000009 * math.cos(5.74437259170 + 26729.31670331319 * self.t)
Z0 += 0.00000000010 * math.cos(3.47517485960 + 140652.80125408198 * self.t)
Z0 += 0.00000000010 * math.cos(2.80299508279 + 55516.4187098482 * self.t)
Z0 += 0.00000000013 * math.cos(4.07250716348 + 34282.1784747828 * self.t)
Z0 += 0.00000000009 * math.cos(0.66918270135 + 52698.38370124219 * self.t)
Z0 += 0.00000000010 * math.cos(5.43273059228 + 126996.94076290558 * self.t)
Z0 += 0.00000000009 * math.cos(1.51413989531 + 97670.38771289718 * self.t)
Z0 += 0.00000000010 * math.cos(1.07944281160 + 742.9900605326 * self.t)
Z0 += 0.00000000009 * math.cos(3.33909678408 + 1911.1994832172 * self.t)
Z0 += 0.00000000009 * math.cos(6.28167845492 + 147423.51185532758 * self.t)
Z0 += 0.00000000010 * math.cos(0.71259250113 + 137210.22630911658 * self.t)
Z0 += 0.00000000009 * math.cos(3.57005441415 + 50800.03248330259 * self.t)
Z0 += 0.00000000009 * math.cos(5.50160718751 + 2199.087343287 * self.t)
Z0 += 0.00000000009 * math.cos(2.08845571909 + 51859.41441349179 * self.t)
Z0 += 0.00000000009 * math.cos(4.25308230419 + 26222.0121911592 * self.t)
Z0 += 0.00000000009 * math.cos(2.50157520492 + 24864.08530079559 * self.t)
Z0 += 0.00000000008 * math.cos(2.47794653703 + 25440.89230825939 * self.t)
Z0 += 0.00000000009 * math.cos(2.85482376043 + 25953.79409198919 * self.t)
Z0 += 0.00000000008 * math.cos(2.29776576843 + 23866.04650697719 * self.t)
Z0 += 0.00000000009 * math.cos(0.78685007135 + 104778.21075717278 * self.t)
Z0 += 0.00000000008 * math.cos(4.62944551319 + 52492.19815280499 * self.t)
Z0 += 0.00000000009 * math.cos(5.48506768547 + 77844.22478084739 * self.t)
Z0 += 0.00000000010 * math.cos(1.12934836943 + 78731.674415077 * self.t)
Z0 += 0.00000000008 * math.cos(2.59549267900 + 26941.0995233262 * self.t)
Z0 += 0.00000000008 * math.cos(4.95780242181 + 78257.08086582259 * self.t)
Z0 += 0.00000000008 * math.cos(2.55696609752 + 17893.6278083656 * self.t)
Z0 += 0.00000000008 * math.cos(4.52002369897 + 104358.72611329758 * self.t)
Z0 += 0.00000000008 * math.cos(3.82529998469 + 76571.54375522019 * self.t)
Z0 += 0.00000000010 * math.cos(2.73498400513 + 323.5054166574 * self.t)
Z0 += 0.00000000009 * math.cos(1.57363188943 + 24998.19435038059 * self.t)
Z0 += 0.00000000008 * math.cos(0.65301230309 + 8194.2753332086 * self.t)
Z0 += 0.00000000008 * math.cos(4.30691637723 + 77410.51304297059 * self.t)
Z0 += 0.00000000008 * math.cos(5.61648727808 + 77795.74443436819 * self.t)
Z0 += 0.00000000009 * math.cos(2.56370604921 + 50696.93970908399 * self.t)
Z0 += 0.00000000008 * math.cos(5.53494196837 + 2008.557539159 * self.t)
Z0 += 0.00000000009 * math.cos(0.81310827208 + 105940.68546158058 * self.t)
Z0 += 0.00000000008 * math.cos(3.03053180726 + 27780.06881107659 * self.t)
Z0 += 0.00000000008 * math.cos(1.09072579794 + 949.1756089698 * self.t)
Z0 += 0.00000000008 * math.cos(5.21537078773 + 176953.98994186718 * self.t)
Z0 += 0.00000000008 * math.cos(2.88040588282 + 36109.7404216736 * self.t)
Z0 += 0.00000000008 * math.cos(3.90879186991 + 54087.0057663656 * self.t)
Z0 += 0.00000000007 * math.cos(2.09304055777 + 25344.9130810416 * self.t)
Z0 += 0.00000000007 * math.cos(3.77982679537 + 19.66976089979 * self.t)
Z0 += 0.00000000008 * math.cos(2.01169539344 + 31415.379249957 * self.t)
Z0 += 0.00000000008 * math.cos(6.01136820214 + 207643.84320240439 * self.t)
Z0 += 0.00000000008 * math.cos(6.24227744862 + 38813.3565763492 * self.t)
Z0 += 0.00000000008 * math.cos(1.33108336808 + 55503.94193942859 * self.t)
Z0 += 0.00000000007 * math.cos(3.30804741677 + 23919.1426592916 * self.t)
Z0 += 0.00000000008 * math.cos(3.48746520229 + 53242.3017603384 * self.t)
Z0 += 0.00000000008 * math.cos(5.56477337187 + 27177.6119327678 * self.t)
Z0 += 0.00000000009 * math.cos(5.00034866965 + 102132.85546210999 * self.t)
Z0 += 0.00000000007 * math.cos(0.40751314845 + 2111.6503133776 * self.t)
Z0 += 0.00000000008 * math.cos(1.66471235682 + 52290.24557183361 * self.t)
Z0 += 0.00000000007 * math.cos(0.77215567463 + 26073.67604757259 * self.t)
Z0 += 0.00000000007 * math.cos(5.32306426879 + 52595.29092702359 * self.t)
Z0 += 0.00000000007 * math.cos(2.57023734016 + 24448.8336243862 * self.t)
Z0 += 0.00000000007 * math.cos(1.40330361432 + 150244.34299945379 * self.t)
Z0 += 0.00000000008 * math.cos(3.96541390761 + 52325.36948028299 * self.t)
Z0 += 0.00000000008 * math.cos(0.64215744063 + 53228.07466633679 * self.t)
Z0 += 0.00000000007 * math.cos(5.40994893955 + 25508.2155545754 * self.t)
Z0 += 0.00000000009 * math.cos(0.63097343517 + 170049.17029103659 * self.t)
Z0 += 0.00000000006 * math.cos(2.73735624509 + 153.7788104848 * self.t)
Z0 += 0.00000000006 * math.cos(1.57808561874 + 49842.60989027639 * self.t)
Z0 += 0.00000000006 * math.cos(5.85639973088 + 182085.63102592478 * self.t)
Z0 += 0.00000000007 * math.cos(1.22602076365 + 107794.18751126219 * self.t)
Z0 += 0.00000000007 * math.cos(2.99828544115 + 22625.658435709 * self.t)
Z0 += 0.00000000007 * math.cos(3.38113527243 + 12546.481939083 * self.t)
Z0 += 0.00000000006 * math.cos(5.98824736570 + 76667.52298243798 * self.t)
Z0 += 0.00000000007 * math.cos(2.75252330601 + 172402.03644480839 * self.t)
Z0 += 0.00000000007 * math.cos(0.93184314111 + 3328.13565628019 * self.t)
Z0 += 0.00000000006 * math.cos(2.72361970528 + 26709.6469424134 * self.t)
Z0 += 0.00000000008 * math.cos(1.81192469081 + 78270.33798362259 * self.t)
Z0 += 0.00000000006 * math.cos(2.31595886989 + 157057.10981453978 * self.t)
Z0 += 0.00000000006 * math.cos(1.73557063122 + 26667.590728573 * self.t)
Z0 += 0.00000000006 * math.cos(4.37863242786 + 25466.159340735 * self.t)
Z0 += 0.00000000006 * math.cos(3.19403160700 + 104275.34649502118 * self.t)
Z0 += 0.00000000006 * math.cos(1.09823646265 + 25867.49049913539 * self.t)
Z0 += 0.00000000007 * math.cos(0.11236053840 + 25764.39772491679 * self.t)
Z0 += 0.00000000007 * math.cos(5.69537766284 + 66653.15746634839 * self.t)
Z0 += 0.00000000007 * math.cos(1.98238737869 + 846.0828347512 * self.t)
Z0 += 0.00000000006 * math.cos(0.65894058731 + 116783.65521669458 * self.t)
Z0 += 0.00000000005 * math.cos(4.66761334131 + 2125.8774073792 * self.t)
Z0 += 0.00000000006 * math.cos(4.52221761723 + 27726.9726587622 * self.t)
Z0 += 0.00000000006 * math.cos(0.09840227420 + 167850.08294774959 * self.t)
Z0 += 0.00000000006 * math.cos(3.36608415071 + 129799.61842155698 * self.t)
Z0 += 0.00000000007 * math.cos(5.27549434329 + 52061.36699446317 * self.t)
Z0 += 0.00000000007 * math.cos(1.32305784951 + 209812.60368468695 * self.t)
Z0 += 0.00000000006 * math.cos(3.60884349448 + 78903.60671103658 * self.t)
Z0 += 0.00000000005 * math.cos(3.76183094594 + 28256.66362385679 * self.t)
Z0 += 0.00000000006 * math.cos(5.26397966812 + 75930.51303185058 * self.t)
Z0 += 0.00000000006 * math.cos(3.23097479261 + 27669.86248985719 * self.t)
Z0 += 0.00000000005 * math.cos(5.68060753615 + 1265.5674786264 * self.t)
Z0 += 0.00000000005 * math.cos(3.87273283224 + 214364.55718174578 * self.t)
Z0 += 0.00000000005 * math.cos(3.93047654027 + 365230.64398203876 * self.t)
Z0 += 0.00000000006 * math.cos(2.60562458198 + 44295.7171298094 * self.t)
Z0 += 0.00000000005 * math.cos(1.56438384826 + 60370.08161635699 * self.t)
Z0 += 0.00000000005 * math.cos(0.91437456221 + 27684.0895838588 * self.t)
Z0 += 0.00000000005 * math.cos(2.78304413268 + 25169.9728555924 * self.t)
Z0 += 0.00000000006 * math.cos(5.76131236705 + 58857.03113654799 * self.t)
Z0 += 0.00000000005 * math.cos(2.67085954875 + 38.1330356378 * self.t)
Z0 += 0.00000000006 * math.cos(0.50453577398 + 155571.81910783658 * self.t)
Z0 += 0.00000000005 * math.cos(3.86250449828 + 52381.99183158559 * self.t)
Z0 += 0.00000000005 * math.cos(4.32361771821 + 27005.83342755599 * self.t)
Z0 += 0.00000000005 * math.cos(1.63783306460 + 34082.4315835984 * self.t)
Z0 += 0.00000000006 * math.cos(4.13235037436 + 29550.14784743939 * self.t)
Z0 += 0.00000000005 * math.cos(0.09069856194 + 13541.42120249119 * self.t)
Z0 += 0.00000000005 * math.cos(5.50663993071 + 26402.0893214438 * self.t)
Z0 += 0.00000000004 * math.cos(5.22554265930 + 28736.3579670472 * self.t)
Z0 += 0.00000000005 * math.cos(5.88997457155 + 78188.92782615528 * self.t)
Z0 += 0.00000000005 * math.cos(1.29427545799 + 77630.92568540938 * self.t)
Z0 += 0.00000000006 * math.cos(3.65818107099 + 76681.75007643958 * self.t)
Z0 += 0.00000000005 * math.cos(1.59378640261 + 25773.71696170459 * self.t)
Z0 += 0.00000000005 * math.cos(5.53985466815 + 24203.0019781568 * self.t)
Z0 += 0.00000000005 * math.cos(1.10121885185 + 130459.18546877075 * self.t)
Z0 += 0.00000000005 * math.cos(3.55866759420 + 52252.07235442399 * self.t)
Z0 += 0.00000000004 * math.cos(5.49843651098 + 52712.61079524379 * self.t)
Z0 += 0.00000000005 * math.cos(3.39959787962 + 104505.39137678158 * self.t)
Z0 += 0.00000000005 * math.cos(1.94175393423 + 1272.6810256272 * self.t)
Z0 += 0.00000000004 * math.cos(0.58115883263 + 26118.2300025786 * self.t)
Z0 += 0.00000000004 * math.cos(0.54682947961 + 157586.80077963437 * self.t)
Z0 += 0.00000000004 * math.cos(2.04212740892 + 148.0787244263 * self.t)
Z0 += 0.00000000005 * math.cos(1.64853656178 + 26198.1094627936 * self.t)
Z0 += 0.00000000005 * math.cos(0.18946812001 + 1.4844727083 * self.t)
Z0 += 0.00000000004 * math.cos(5.04774492766 + 112231.70171963578 * self.t)
Z0 += 0.00000000004 * math.cos(3.24889530044 + | |
# Lint as: python2, python3
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class definitions for dsub jobs.
The dsub object model is based around jobs and tasks.
A dsub job specified exclusively with command-line arguments contains one
implicit task (task-id: None).
A dsub job launched with the --tasks flag will contain one or more
explicitly identified tasks (task-id: <n>).
dsub jobs are made up of:
- metadata: job-id, user-id, create-time, etc.
- params: labels, envs, inputs, outputs
- resources: logging-uri, min-cpu, min-ram, etc.
tasks are made up of
- metadata: task-id, task-attempt (only when retries!=0)
- params: labels, envs, inputs, outputs
- resources: logging-uri, min-cpu, min-ram, etc.
The top-level object is the JobDescriptor, which contains:
job_metadata: A dict of metadata values.
job_params: A dict of parameter values.
job_resources: A Resources object.
An array of TaskDescriptors.
(That the job_metadata and job_params are not well-defined objects is
historical, rather than intentional.)
Each TaskDescriptor contains:
task_metadata: A dict of metadata values.
task_params: A dict of parameter values.
task_resources: A Resources object.
The object model here is presently more complete than what the user-interface
allows. For example, min-cpu, min-ram, and other resources are not supported
in the --tasks file, but the object model allows for callers using the Python
API to set each of the resource fields at the task level.
"""
from __future__ import print_function
import collections
import datetime
import re
import string
from . import dsub_util
from dateutil.tz import tzlocal
import pytz
import yaml
DEFAULT_MIN_CORES = 1
DEFAULT_MIN_RAM = 3.75
DEFAULT_MACHINE_TYPE = 'n1-standard-1'
DEFAULT_DISK_SIZE = 200
DEFAULT_BOOT_DISK_SIZE = 10
DEFAULT_MOUNTED_DISK_SIZE = 10
DEFAULT_PREEMPTIBLE = False
DEFAULT_DISK_TYPE = 'pd-standard'
DEFAULT_LOCATION = 'us-central1'
# Users may specify their own labels, however dsub also uses an implicit set of
# labels (in the google provider). Reserve these labels such that users do
# not attempt to set them.
RESERVED_LABELS = frozenset([
'job-name', 'job-id', 'user-id', 'task-id', 'dsub-version', 'task-attempt'
])
P_LOCAL = 'local'
P_GCS = 'google-cloud-storage'
FILE_PROVIDERS = frozenset([P_LOCAL, P_GCS])
class Script(object):
"""Script to be run by for the job.
The Pipeline's API specifically supports bash commands as the docker
command. To support any type of script (Python, Ruby, etc.), the contents
are uploaded as a simple environment variable input parameter.
The docker command then writes the variable contents to a file and
executes it.
Attributes:
name: (str) File name of this script.
value: (str) Content of this script.
"""
def __init__(self, name, value):
self.name = name
self.value = value
def validate_param_name(name, param_type):
"""Validate that the name follows posix conventions for env variables."""
# http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_235
#
# 3.235 Name
# In the shell command language, a word consisting solely of underscores,
# digits, and alphabetics from the portable character set.
if not re.match(r'^[a-zA-Z_][a-zA-Z0-9_]*$', name):
raise ValueError('Invalid %s: %s' % (param_type, name))
def validate_bucket_name(bucket):
"""Validate that the name is a valid GCS bucket."""
if not bucket.startswith('gs://'):
raise ValueError(
'Invalid bucket path "%s". Must start with "gs://".' % bucket)
bucket_name = bucket[len('gs://'):]
if not re.search(r'^\w[\w_\.-]{1,61}\w$', bucket_name):
raise ValueError('Invalid bucket name: %s' % bucket)
class UriParts(str):
"""Subclass string for multipart URIs.
This string subclass is used for URI references. The path and basename
attributes are used to maintain separation of this information in cases where
it might otherwise be ambiguous. The value of a UriParts string is a URI.
Attributes:
path: Strictly speaking, the path attribute is the entire leading part of
a URI (including scheme, host, and path). This attribute defines the
hierarchical location of a resource. Path must end in a forward
slash. Local file URIs are represented as relative URIs (path only).
basename: The last token of a path that follows a forward slash. Generally
this defines a specific resource or a pattern that matches resources. In
the case of URI's that consist only of a path, this will be empty.
Examples:
| uri | uri.path | uri.basename |
+-----------------------------+------------------------+---------------|
| gs://bucket/folder/file.txt | 'gs://bucket/folder/' | 'file.txt' |
| http://example.com/1.htm | 'http://example.com/' | '1.htm' |
| /tmp/tempdir1/ | '/tmp/tempdir1/' | '' |
| /tmp/ab.txt | '/tmp/' | 'ab.txt' |
"""
def __new__(cls, path, basename):
basename = basename if basename is not None else ''
newuri = str.__new__(cls, path + basename)
newuri.path = path
newuri.basename = basename
return newuri
class EnvParam(collections.namedtuple('EnvParam', ['name', 'value'])):
"""Name/value input parameter to a pipeline.
Attributes:
name (str): the input parameter and environment variable name.
value (str): the variable value (optional).
"""
__slots__ = ()
def __new__(cls, name, value=None):
validate_param_name(name, 'Environment variable')
return super(EnvParam, cls).__new__(cls, name, value)
class LoggingParam(
collections.namedtuple('LoggingParam', ['uri', 'file_provider'])):
"""File parameter used for logging.
Attributes:
uri (UriParts): A uri or local file path.
file_provider (enum): Service or infrastructure hosting the file.
"""
pass
def convert_to_label_chars(s):
"""Turn the specified name and value into a valid Google label."""
# We want the results to be user-friendly, not just functional.
# So we can't base-64 encode it.
# * If upper-case: lower-case it
# * If the char is not a standard letter or digit. make it a dash
# March 2019 note: underscores are now allowed in labels.
# However, removing the conversion of underscores to dashes here would
# create inconsistencies between old jobs and new jobs.
# With existing code, $USER "jane_doe" has a user-id label of "jane-doe".
# If we remove the conversion, the user-id label for new jobs is "jane_doe".
# This makes looking up old jobs more complicated.
accepted_characters = string.ascii_lowercase + string.digits + '-'
def label_char_transform(char):
if char in accepted_characters:
return char
if char in string.ascii_uppercase:
return char.lower()
return '-'
return ''.join(label_char_transform(c) for c in s)
class LabelParam(collections.namedtuple('LabelParam', ['name', 'value'])):
"""Name/value label parameter to a pipeline.
Subclasses of LabelParam may flip the _allow_reserved_keys attribute in order
to allow reserved label values to be used. The check against reserved keys
ensures that providers can rely on the label system to track dsub-related
values without allowing users to accidentally overwrite the labels.
Attributes:
name (str): the label name.
value (str): the label value (optional).
"""
_allow_reserved_keys = False
__slots__ = ()
def __new__(cls, name, value=None):
cls._validate_label(name, value)
return super(LabelParam, cls).__new__(cls, name, value)
@classmethod
def _validate_label(cls, name, value):
"""Raise ValueError if the label is invalid."""
# Rules for labels are described in:
# https://cloud.google.com/compute/docs/labeling-resources#restrictions
# * Keys and values cannot be longer than 63 characters each.
# * Keys and values can only contain lowercase letters, numeric characters,
# underscores, and dashes.
# * International characters are allowed.
# * Label keys must start with a lowercase letter and international
# characters are allowed.
# * Label keys cannot be empty.
cls._check_label_name(name)
cls._check_label_value(value)
# Ensure that reserved labels are not being used.
if not cls._allow_reserved_keys and name in RESERVED_LABELS:
raise ValueError('Label flag (%s=...) must not use reserved keys: %r' %
(name, list(RESERVED_LABELS)))
@staticmethod
def _check_label_name(name):
if len(name) < 1 or len(name) > 63:
raise ValueError('Label name must be 1-63 characters long: "%s"' % name)
if not re.match(r'^[a-z]([-_a-z0-9]*)?$', name):
raise ValueError(
'Invalid name for label: "%s". Must start with a lowercase letter '
'and contain only lowercase letters, numeric characters, '
'underscores, and dashes.' % name)
@staticmethod
def _check_label_value(value):
if not value:
return
if len(value) > 63:
raise ValueError(
'Label values must not be longer than 63 characters: "%s"' % value)
if not re.match(r'^([-_a-z0-9]*)?$', value):
raise ValueError(
'Invalid value for label: "%s". Must contain only lowercase letters, '
'numeric characters, underscores, and dashes.' % value)
class FileParam(
collections.namedtuple('FileParam', [
'name',
'value',
'docker_path',
'uri',
'recursive',
'file_provider',
'disk_size',
'disk_type',
])):
"""File parameter to be automatically localized or de-localized.
Input files are automatically localized to the pipeline VM's local disk.
Output files are automatically de-localized to a remote URI from the
pipeline VM's local disk.
Attributes:
name (str): the parameter and environment variable name.
value | |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class controller(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-openflow-operational - based on the path /openflow-state/controller. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__name','__mode','__type','__connection_type','__ip_addr','__port','__vrf_name','__status','__role',)
_yang_name = 'controller'
_rest_name = 'controller'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__connection_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-connection-type-tcp': {'value': 0}, u'dcm-connection-type-ssl': {'value': 1}},), is_leaf=True, yang_name="connection-type", rest_name="connection-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-connection-type', is_config=False)
self.__status = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-ctrlr-status-tcp-connecting': {'value': 3}, u'dcm-ctrlr-status-max': {'value': 8}, u'dcm-ctrlr-status-openf-handshake': {'value': 5}, u'dcm-ctrlr-status-init': {'value': 0}, u'dcm-ctrlr-status-tcp-established': {'value': 4}, u'dcm-ctrlr-status-close': {'value': 2}, u'dcm-ctrlr-status-openf-established': {'value': 6}, u'dcm-ctrlr-status-tcp-listening': {'value': 7}, u'dcm-ctrlr-status-unknown': {'value': 1}},), is_leaf=True, yang_name="status", rest_name="status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-connection-status', is_config=False)
self.__ip_addr = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="ip-addr", rest_name="ip-addr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='inet:ipv4-address', is_config=False)
self.__name = YANGDynClass(base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__vrf_name = YANGDynClass(base=unicode, is_leaf=True, yang_name="vrf-name", rest_name="vrf-name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
self.__mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-connection-mode-passive': {'value': 1}, u'dcm-connection-mode-active': {'value': 0}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-connection-mode', is_config=False)
self.__role = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-ctrlr-role-invalid': {'value': 0}, u'dcm-ctrlr-role-equal': {'value': 1}, u'dcm-ctrlr-role-slave': {'value': 3}, u'dcm-ctrlr-role-master': {'value': 2}},), is_leaf=True, yang_name="role", rest_name="role", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-role', is_config=False)
self.__type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'ofv100': {'value': 0}, u'ofv130': {'value': 1}},), is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-type', is_config=False)
self.__port = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="port", rest_name="port", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='uint32', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'openflow-state', u'controller']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'openflow-state', u'controller']
def _get_name(self):
"""
Getter method for name, mapped from YANG variable /openflow_state/controller/name (string)
YANG Description: Controller name
"""
return self.__name
def _set_name(self, v, load=False):
"""
Setter method for name, mapped from YANG variable /openflow_state/controller/name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_name() directly.
YANG Description: Controller name
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)""",
})
self.__name = t
if hasattr(self, '_set'):
self._set()
def _unset_name(self):
self.__name = YANGDynClass(base=unicode, is_leaf=True, yang_name="name", rest_name="name", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='string', is_config=False)
def _get_mode(self):
"""
Getter method for mode, mapped from YANG variable /openflow_state/controller/mode (ctrlr-connection-mode)
YANG Description: Controller connection mode
"""
return self.__mode
def _set_mode(self, v, load=False):
"""
Setter method for mode, mapped from YANG variable /openflow_state/controller/mode (ctrlr-connection-mode)
If this variable is read-only (config: false) in the
source YANG file, then _set_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mode() directly.
YANG Description: Controller connection mode
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-connection-mode-passive': {'value': 1}, u'dcm-connection-mode-active': {'value': 0}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-connection-mode', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mode must be of a type compatible with ctrlr-connection-mode""",
'defined-type': "brocade-openflow-operational:ctrlr-connection-mode",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-connection-mode-passive': {'value': 1}, u'dcm-connection-mode-active': {'value': 0}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-connection-mode', is_config=False)""",
})
self.__mode = t
if hasattr(self, '_set'):
self._set()
def _unset_mode(self):
self.__mode = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-connection-mode-passive': {'value': 1}, u'dcm-connection-mode-active': {'value': 0}},), is_leaf=True, yang_name="mode", rest_name="mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-connection-mode', is_config=False)
def _get_type(self):
"""
Getter method for type, mapped from YANG variable /openflow_state/controller/type (ctrlr-type)
YANG Description: type
"""
return self.__type
def _set_type(self, v, load=False):
"""
Setter method for type, mapped from YANG variable /openflow_state/controller/type (ctrlr-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_type() directly.
YANG Description: type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'ofv100': {'value': 0}, u'ofv130': {'value': 1}},), is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """type must be of a type compatible with ctrlr-type""",
'defined-type': "brocade-openflow-operational:ctrlr-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'ofv100': {'value': 0}, u'ofv130': {'value': 1}},), is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-type', is_config=False)""",
})
self.__type = t
if hasattr(self, '_set'):
self._set()
def _unset_type(self):
self.__type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'ofv100': {'value': 0}, u'ofv130': {'value': 1}},), is_leaf=True, yang_name="type", rest_name="type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-type', is_config=False)
def _get_connection_type(self):
"""
Getter method for connection_type, mapped from YANG variable /openflow_state/controller/connection_type (ctrlr-connection-type)
YANG Description: Controller connection type
"""
return self.__connection_type
def _set_connection_type(self, v, load=False):
"""
Setter method for connection_type, mapped from YANG variable /openflow_state/controller/connection_type (ctrlr-connection-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_connection_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_connection_type() directly.
YANG Description: Controller connection type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-connection-type-tcp': {'value': 0}, u'dcm-connection-type-ssl': {'value': 1}},), is_leaf=True, yang_name="connection-type", rest_name="connection-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-connection-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """connection_type must be of a type compatible with ctrlr-connection-type""",
'defined-type': "brocade-openflow-operational:ctrlr-connection-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-connection-type-tcp': {'value': 0}, u'dcm-connection-type-ssl': {'value': 1}},), is_leaf=True, yang_name="connection-type", rest_name="connection-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-connection-type', is_config=False)""",
})
self.__connection_type = t
if hasattr(self, '_set'):
self._set()
def _unset_connection_type(self):
self.__connection_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'dcm-connection-type-tcp': {'value': 0}, u'dcm-connection-type-ssl': {'value': 1}},), is_leaf=True, yang_name="connection-type", rest_name="connection-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='ctrlr-connection-type', is_config=False)
def _get_ip_addr(self):
"""
Getter method for ip_addr, mapped from YANG variable /openflow_state/controller/ip_addr (inet:ipv4-address)
YANG Description: IP address
"""
return self.__ip_addr
def _set_ip_addr(self, v, load=False):
"""
Setter method for ip_addr, mapped from YANG variable /openflow_state/controller/ip_addr (inet:ipv4-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_ip_addr is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ip_addr() directly.
YANG Description: IP address
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="ip-addr", rest_name="ip-addr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='inet:ipv4-address', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ip_addr must be of a type compatible with inet:ipv4-address""",
'defined-type': "inet:ipv4-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="ip-addr", rest_name="ip-addr", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='inet:ipv4-address', is_config=False)""",
})
| |
<reponame>swederik/structurefunction<gh_stars>1-10
import os
import os.path as op
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.fsl as fsl
import nipype.interfaces.freesurfer as fs
import nipype.interfaces.mrtrix as mrtrix
import nipype.interfaces.cmtk as cmtk
from nipype.workflows.misc.utils import select_aparc
fsl.FSLCommand.set_default_output_type('NIFTI')
from coma.interfaces import RegionalValues, nonlinfit_fn, CMR_glucose
def summarize_precoth(dwi_network_file, fdg_stats_file, subject_id):
import os.path as op
import scipy.io as sio
import networkx as nx
fdg = sio.loadmat(fdg_stats_file)
dwi_ntwk = nx.read_gpickle(dwi_network_file)
# Thal L-1 R-2
# Cortex 3 and 4
# Prec L-5 R-6
titles = ["subjid"]
fdg_avg = ["LTh_CMR_avg","RTh_CMR_avg","LCo_CMR_avg","RCo_CMR_avg","LPre_CMR_avg","RPre_CMR_avg"]
f_avg = [fdg["func_mean"][0][0],fdg["func_mean"][1][0],fdg["func_mean"][2][0],
fdg["func_mean"][3][0],fdg["func_mean"][4][0],fdg["func_mean"][5][0]]
fdg_max = ["LTh_CMR_max","RTh_CMR_max","LCo_CMR_max","RCo_CMR_max","LPre_CMR_max","RPre_CMR_max"]
f_max = [fdg["func_max"][0][0],fdg["func_max"][1][0],fdg["func_max"][2][0],
fdg["func_max"][3][0],fdg["func_max"][4][0],fdg["func_max"][5][0]]
fdg_min = ["LTh_CMR_min","RTh_CMR_min","LCo_CMR_min","RCo_CMR_min","LPre_CMR_min","RPre_CMR_min"]
f_min = [fdg["func_min"][0][0],fdg["func_min"][1][0],fdg["func_min"][2][0],
fdg["func_min"][3][0],fdg["func_min"][4][0],fdg["func_min"][5][0]]
fdg_std = ["LTh_CMR_std","RTh_CMR_std","LCo_CMR_std","RCo_CMR_std","LPre_CMR_std","RPre_CMR_std"]
f_std = [fdg["func_stdev"][0][0],fdg["func_stdev"][1][0],fdg["func_stdev"][2][0],
fdg["func_stdev"][3][0],fdg["func_stdev"][4][0],fdg["func_stdev"][5][0]]
fdg_titles = fdg_avg + fdg_max + fdg_min + fdg_std
dwi = nx.to_numpy_matrix(dwi_ntwk, weight="weight")
l_thal = ["LTh_RTh","LTh_LCo","LTh_RCo","LTh_LPre","LTh_RPre"]
l_th = [dwi[0,1], dwi[0,2], dwi[0,3], dwi[0,4], dwi[0,5]]
r_thal = ["RTh_LCo","RTh_RCo","RTh_LPre","RTh_RPre"]
r_th = [dwi[1,2], dwi[1,3], dwi[1,4], dwi[1,5]]
l_co = ["LCo_RCo","LCo_LPre","LCo_RPre"]
l_cor = [dwi[2,3], dwi[2,4], dwi[2,5]]
r_co = ["RCo_LPre","RCo_RPre"]
r_cor = [dwi[3,4], dwi[3,5]]
l_pre = ["LPre_RPre"]
l_prec = [dwi[4,5]]
conn_titles = l_thal + r_thal + l_co + r_co + l_pre
all_titles = titles + fdg_titles + conn_titles
volume_titles = ["VoxLTh","VoxRTh","VoxLCo", "VoxRCo", "VoxLPre", "VoxRPre"]
all_titles = all_titles + volume_titles
volumes = fdg["number_of_voxels"]
all_data = f_avg + f_max + f_min + f_std + l_th + r_th + l_cor + r_cor + l_prec + volumes[:,0].tolist()
out_file = op.abspath(subject_id + "_precoth.csv")
f = open(out_file, "w")
title_str = ",".join(all_titles) + "\n"
f.write(title_str)
all_data = map(float, all_data)
data_str = subject_id + "," + ",".join(format(x, "10.5f") for x in all_data) + "\n"
f.write(data_str)
f.close()
return out_file
def extract_PreCoTh(in_file, out_filename):
from nipype.utils.filemanip import split_filename
import nibabel as nb
import numpy as np
import os.path as op
in_image = nb.load(in_file)
in_header = in_image.get_header()
in_data = in_image.get_data()
# Left, Right -> Now
# Thalamus are 71 and 35
# Thal L-1 R-2
# Precuneus are 61 and 20
# Prec L-5 R-6
# Cortex 3 and 42
# Cortex 3 and 4
MAPPING = [
[4, 2012], [4, 2019], [4, 2032], [4, 2014], [4, 2020], [4, 2018],
[4, 2027], [4, 2028], [4, 2003], [4, 2024], [4, 2017], [4, 2026],
[4, 2002], [4, 2023], [4, 2010], [4, 2022], [4, 2031], [4, 2029],
[4, 2008], [4, 2005], [4, 2021], [4, 2011],
[4, 2013], [4, 2007], [4, 2016], [4, 2006], [4, 2033], [4, 2009],
[4, 2015], [4, 2001], [4, 2030], [4, 2034], [4, 2035],
[3, 1012], [3, 1019], [3, 1032], [3, 1014], [3, 1020], [3, 1018],
[3, 1027], [3, 1028], [3, 1003], [3, 1024], [3, 1017], [3, 1026],
[3, 1002], [3, 1023], [3, 1010], [3, 1022], [3, 1031],
[3, 1029], [3, 1008], [3, 1005], [3, 1021], [3, 1011], [3,1013],
[3, 1007], [3, 1016], [3, 1006], [3, 1033],
[3, 1009], [3, 1015], [3, 1001], [3, 1030], [3, 1034], [3, 1035],
[5, 1025], [6, 2025], [1, 10], [2, 49]]
niiGM = np.zeros(in_data.shape, dtype=np.uint)
for ma in MAPPING:
niiGM[in_data == ma[1]] = ma[0]
_, name, _ = split_filename(in_file)
out_file = op.abspath(out_filename)
try:
out_image = nb.Nifti1Image(
data=niiGM, header=in_header, affine=in_image.get_affine())
except TypeError:
out_image = nb.Nifti1Image(
dataobj=niiGM, header=in_header, affine=in_image.get_affine())
nb.save(out_image, out_file)
return out_file
def create_precoth_pipeline(name="precoth", tractography_type='probabilistic', reg_pet_T1=True):
inputnode = pe.Node(
interface=util.IdentityInterface(fields=["subjects_dir",
"subject_id",
"dwi",
"bvecs",
"bvals",
"fdgpet",
"dose",
"weight",
"delay",
"glycemie",
"scan_time"]),
name="inputnode")
nonlinfit_interface = util.Function(input_names=["dwi", "bvecs", "bvals", "base_name"],
output_names=["tensor", "FA", "MD", "evecs", "evals", "rgb_fa", "norm", "mode", "binary_mask", "b0_masked"], function=nonlinfit_fn)
nonlinfit_node = pe.Node(interface=nonlinfit_interface, name="nonlinfit_node")
coregister = pe.Node(interface=fsl.FLIRT(dof=12), name = 'coregister')
coregister.inputs.cost = ('normmi')
invertxfm = pe.Node(interface=fsl.ConvertXFM(), name = 'invertxfm')
invertxfm.inputs.invert_xfm = True
WM_to_FA = pe.Node(interface=fsl.ApplyXfm(), name = 'WM_to_FA')
WM_to_FA.inputs.interp = 'nearestneighbour'
TermMask_to_FA = WM_to_FA.clone("TermMask_to_FA")
mni_for_reg = op.join(os.environ["FSL_DIR"],"data","standard","MNI152_T1_1mm.nii.gz")
reorientBrain = pe.Node(interface=fsl.FLIRT(dof=6), name = 'reorientBrain')
reorientBrain.inputs.reference = mni_for_reg
reorientROIs = pe.Node(interface=fsl.ApplyXfm(), name = 'reorientROIs')
reorientROIs.inputs.interp = "nearestneighbour"
reorientROIs.inputs.reference = mni_for_reg
reorientRibbon = reorientROIs.clone("reorientRibbon")
reorientRibbon.inputs.interp = "nearestneighbour"
reorientT1 = reorientROIs.clone("reorientT1")
reorientT1.inputs.interp = "trilinear"
fsl2mrtrix = pe.Node(interface=mrtrix.FSL2MRTrix(), name='fsl2mrtrix')
fsl2mrtrix.inputs.invert_y = True
erode_mask_firstpass = pe.Node(interface=mrtrix.Erode(),
name='erode_mask_firstpass')
erode_mask_firstpass.inputs.out_filename = "b0_mask_median3D_erode.nii.gz"
erode_mask_secondpass = pe.Node(interface=mrtrix.Erode(),
name='erode_mask_secondpass')
erode_mask_secondpass.inputs.out_filename = "b0_mask_median3D_erode_secondpass.nii.gz"
threshold_FA = pe.Node(interface=fsl.ImageMaths(), name='threshold_FA')
threshold_FA.inputs.op_string = "-thr 0.8 -uthr 0.99"
threshold_mode = pe.Node(interface=fsl.ImageMaths(), name='threshold_mode')
threshold_mode.inputs.op_string = "-thr 0.1 -uthr 0.99"
make_termination_mask = pe.Node(interface=fsl.ImageMaths(), name='make_termination_mask')
make_termination_mask.inputs.op_string = "-bin"
get_wm_mask = pe.Node(interface=fsl.ImageMaths(), name='get_wm_mask')
get_wm_mask.inputs.op_string = "-thr 0.1"
MRmultiply = pe.Node(interface=mrtrix.MRMultiply(), name='MRmultiply')
MRmultiply.inputs.out_filename = "Eroded_FA.nii.gz"
MRmult_merge = pe.Node(interface=util.Merge(2), name='MRmultiply_merge')
median3d = pe.Node(interface=mrtrix.MedianFilter3D(), name='median3D')
fdgpet_regions = pe.Node(interface=RegionalValues(), name='fdgpet_regions')
compute_cmr_glc_interface = util.Function(input_names=["in_file", "dose", "weight", "delay",
"glycemie", "scan_time"], output_names=["out_file"], function=CMR_glucose)
compute_cmr_glc = pe.Node(interface=compute_cmr_glc_interface, name='compute_cmr_glc')
csdeconv = pe.Node(interface=mrtrix.ConstrainedSphericalDeconvolution(),
name='csdeconv')
estimateresponse = pe.Node(interface=mrtrix.EstimateResponseForSH(),
name='estimateresponse')
if tractography_type == 'probabilistic':
CSDstreamtrack = pe.Node(
interface=mrtrix.ProbabilisticSphericallyDeconvolutedStreamlineTrack(
),
name='CSDstreamtrack')
else:
CSDstreamtrack = pe.Node(
interface=mrtrix.SphericallyDeconvolutedStreamlineTrack(),
name='CSDstreamtrack')
#CSDstreamtrack.inputs.desired_number_of_tracks = 10000
CSDstreamtrack.inputs.minimum_tract_length = 50
tck2trk = pe.Node(interface=mrtrix.MRTrix2TrackVis(), name='tck2trk')
extract_PreCoTh_interface = util.Function(input_names=["in_file", "out_filename"],
output_names=["out_file"],
function=extract_PreCoTh)
thalamus2precuneus2cortex_ROIs = pe.Node(
interface=extract_PreCoTh_interface, name='thalamus2precuneus2cortex_ROIs')
wm_mask_interface = util.Function(input_names=["in_file", "out_filename"],
output_names=["out_file"],
function=wm_labels_only)
make_wm_mask = pe.Node(
interface=wm_mask_interface, name='make_wm_mask')
write_precoth_data_interface = util.Function(input_names=["dwi_network_file", "fdg_stats_file", "subject_id"],
output_names=["out_file"],
function=summarize_precoth)
write_csv_data = pe.Node(
interface=write_precoth_data_interface, name='write_csv_data')
thalamus2precuneus2cortex = pe.Node(
interface=cmtk.CreateMatrix(), name="thalamus2precuneus2cortex")
thalamus2precuneus2cortex.inputs.count_region_intersections = True
FreeSurferSource = pe.Node(
interface=nio.FreeSurferSource(), name='fssource')
mri_convert_Brain = pe.Node(
interface=fs.MRIConvert(), name='mri_convert_Brain')
mri_convert_Brain.inputs.out_type = 'niigz'
mri_convert_Brain.inputs.no_change = True
if reg_pet_T1:
reg_pet_T1 = pe.Node(interface=fsl.FLIRT(dof=6), name = 'reg_pet_T1')
reg_pet_T1.inputs.cost = ('corratio')
reslice_fdgpet = mri_convert_Brain.clone("reslice_fdgpet")
reslice_fdgpet.inputs.no_change = True
mri_convert_Ribbon = mri_convert_Brain.clone("mri_convert_Ribbon")
mri_convert_ROIs = mri_convert_Brain.clone("mri_convert_ROIs")
mri_convert_T1 = mri_convert_Brain.clone("mri_convert_T1")
workflow = pe.Workflow(name=name)
workflow.base_output_dir = name
workflow.connect(
[(inputnode, FreeSurferSource, [("subjects_dir", "subjects_dir")])])
workflow.connect(
[(inputnode, FreeSurferSource, [("subject_id", "subject_id")])])
workflow.connect(
[(FreeSurferSource, mri_convert_T1, [('T1', 'in_file')])])
workflow.connect(
[(mri_convert_T1, reorientT1, [('out_file', 'in_file')])])
workflow.connect(
[(FreeSurferSource, mri_convert_Brain, [('brain', 'in_file')])])
workflow.connect(
[(mri_convert_Brain, reorientBrain, [('out_file', 'in_file')])])
workflow.connect(
[(reorientBrain, reorientROIs, [('out_matrix_file', 'in_matrix_file')])])
workflow.connect(
[(reorientBrain, reorientRibbon, [('out_matrix_file', 'in_matrix_file')])])
workflow.connect(
[(reorientBrain, reorientT1, [('out_matrix_file', 'in_matrix_file')])])
workflow.connect(
[(FreeSurferSource, mri_convert_ROIs, [(('aparc_aseg', select_aparc), 'in_file')])])
workflow.connect(
[(mri_convert_ROIs, reorientROIs, [('out_file', 'in_file')])])
workflow.connect(
[(reorientROIs, make_wm_mask, [('out_file', 'in_file')])])
workflow.connect(
[(FreeSurferSource, mri_convert_Ribbon, [(('ribbon', select_ribbon), 'in_file')])])
workflow.connect(
[(mri_convert_Ribbon, reorientRibbon, [('out_file', 'in_file')])])
workflow.connect(
[(reorientRibbon, make_termination_mask, [('out_file', 'in_file')])])
workflow.connect([(inputnode, fsl2mrtrix, [("bvecs", "bvec_file"),
("bvals", "bval_file")])])
workflow.connect(inputnode, 'dwi', nonlinfit_node, 'dwi')
workflow.connect(inputnode, 'subject_id', nonlinfit_node, 'base_name')
workflow.connect(inputnode, 'bvecs', nonlinfit_node, 'bvecs')
workflow.connect(inputnode, 'bvals', nonlinfit_node, 'bvals')
workflow.connect([(inputnode, compute_cmr_glc, [("dose", "dose")])])
workflow.connect([(inputnode, compute_cmr_glc, [("weight", "weight")])])
workflow.connect([(inputnode, compute_cmr_glc, [("delay", "delay")])])
workflow.connect([(inputnode, compute_cmr_glc, [("glycemie", "glycemie")])])
workflow.connect([(inputnode, compute_cmr_glc, [("scan_time", "scan_time")])])
if reg_pet_T1:
workflow.connect([(inputnode, reg_pet_T1, [("fdgpet", "in_file")])])
workflow.connect(
[(reorientBrain, reg_pet_T1, [("out_file", "reference")])])
workflow.connect(
[(reg_pet_T1, reslice_fdgpet, [("out_file", "in_file")])])
workflow.connect(
[(reorientROIs, reslice_fdgpet, [("out_file", "reslice_like")])])
workflow.connect(
[(reslice_fdgpet, compute_cmr_glc, [("out_file", "in_file")])])
else:
workflow.connect([(inputnode, reslice_fdgpet, [("fdgpet", "in_file")])])
workflow.connect(
[(reorientROIs, reslice_fdgpet, [("out_file", "reslice_like")])])
workflow.connect(
[(reslice_fdgpet, compute_cmr_glc, [("out_file", "in_file")])])
workflow.connect(
[(compute_cmr_glc, fdgpet_regions, [("out_file", "in_files")])])
workflow.connect(
[(thalamus2precuneus2cortex_ROIs, fdgpet_regions, [("out_file", "segmentation_file")])])
workflow.connect([(nonlinfit_node, coregister,[("FA","in_file")])])
workflow.connect([(make_wm_mask, coregister,[('out_file','reference')])])
workflow.connect([(nonlinfit_node, tck2trk,[("FA","image_file")])])
workflow.connect([(reorientBrain, tck2trk,[("out_file","registration_image_file")])])
workflow.connect([(coregister, tck2trk,[("out_matrix_file","matrix_file")])])
workflow.connect([(coregister, invertxfm,[("out_matrix_file","in_file")])])
workflow.connect([(invertxfm, WM_to_FA,[("out_file","in_matrix_file")])])
workflow.connect([(make_wm_mask, WM_to_FA,[("out_file","in_file")])])
workflow.connect([(nonlinfit_node, WM_to_FA,[("FA","reference")])])
workflow.connect([(invertxfm, TermMask_to_FA,[("out_file","in_matrix_file")])])
workflow.connect([(make_termination_mask, TermMask_to_FA,[("out_file","in_file")])])
workflow.connect([(nonlinfit_node, TermMask_to_FA,[("FA","reference")])])
workflow.connect([(nonlinfit_node, median3d, [("binary_mask", "in_file")])])
workflow.connect(
[(median3d, erode_mask_firstpass, [("out_file", "in_file")])])
workflow.connect(
[(erode_mask_firstpass, erode_mask_secondpass, [("out_file", "in_file")])])
workflow.connect([(nonlinfit_node, MRmult_merge, [("FA", "in1")])])
workflow.connect(
[(erode_mask_secondpass, MRmult_merge, [("out_file", "in2")])])
workflow.connect([(MRmult_merge, MRmultiply, [("out", "in_files")])])
workflow.connect([(MRmultiply, threshold_FA, [("out_file", "in_file")])])
workflow.connect(
[(threshold_FA, estimateresponse, [("out_file", "mask_image")])])
workflow.connect([(inputnode, estimateresponse, [("dwi", "in_file")])])
workflow.connect(
[(fsl2mrtrix, estimateresponse, [("encoding_file", "encoding_file")])])
workflow.connect([(inputnode, csdeconv, [("dwi", "in_file")])])
#workflow.connect(
# [(TermMask_to_FA, csdeconv, [("out_file", "mask_image")])])
workflow.connect(
[(estimateresponse, csdeconv, [("response", "response_file")])])
workflow.connect(
[(fsl2mrtrix, csdeconv, [("encoding_file", "encoding_file")])])
workflow.connect(
[(WM_to_FA, CSDstreamtrack, [("out_file", "seed_file")])])
workflow.connect(
[(TermMask_to_FA, CSDstreamtrack, [("out_file", "mask_file")])])
workflow.connect(
[(csdeconv, CSDstreamtrack, [("spherical_harmonics_image", "in_file")])])
workflow.connect([(CSDstreamtrack, tck2trk, [("tracked", "in_file")])])
workflow.connect(
[(tck2trk, thalamus2precuneus2cortex, [("out_file", "tract_file")])])
workflow.connect(
[(inputnode, thalamus2precuneus2cortex, [("subject_id", "out_matrix_file")])])
workflow.connect(
[(inputnode, thalamus2precuneus2cortex, [("subject_id", "out_matrix_mat_file")])])
workflow.connect(
[(reorientROIs, thalamus2precuneus2cortex_ROIs, [("out_file", "in_file")])])
workflow.connect(
[(thalamus2precuneus2cortex_ROIs, thalamus2precuneus2cortex, [("out_file", "roi_file")])])
workflow.connect(
[(thalamus2precuneus2cortex, fdgpet_regions, [("matrix_file", "resolution_network_file")])])
workflow.connect(
[(inputnode, write_csv_data, [("subject_id", "subject_id")])])
workflow.connect(
[(fdgpet_regions, write_csv_data, [("stats_file", "fdg_stats_file")])])
workflow.connect(
[(thalamus2precuneus2cortex, write_csv_data, [("intersection_matrix_file", "dwi_network_file")])])
output_fields = ["fa", "rgb_fa", "md", "csdeconv", "tracts_tck", "rois", "t1",
"t1_brain", "wmmask_dtispace", "fa_t1space", "summary", "filtered_tractographies",
"matrix_file", "connectome", "CMR_nodes", "fiber_labels_noorphans", "fiber_length_file",
"fiber_label_file", "intersection_matrix_mat_file"]
outputnode = pe.Node(
interface=util.IdentityInterface(fields=output_fields),
name="outputnode")
workflow.connect(
[(CSDstreamtrack, outputnode, [("tracked", "tracts_tck")]),
(csdeconv, outputnode,
[("spherical_harmonics_image", "csdeconv")]),
(nonlinfit_node, outputnode, [("FA", "fa")]),
(coregister, outputnode, [("out_file", "fa_t1space")]),
(reorientBrain, outputnode, [("out_file", "t1_brain")]),
(reorientT1, outputnode, [("out_file", "t1")]),
(thalamus2precuneus2cortex_ROIs, outputnode, [("out_file", "rois")]),
(thalamus2precuneus2cortex, outputnode, [("filtered_tractographies", "filtered_tractographies")]),
(thalamus2precuneus2cortex, outputnode, [("matrix_file", "connectome")]),
(thalamus2precuneus2cortex, outputnode, [("fiber_labels_noorphans", "fiber_labels_noorphans")]),
(thalamus2precuneus2cortex, outputnode, [("fiber_length_file", "fiber_length_file")]),
(thalamus2precuneus2cortex, outputnode, [("fiber_label_file", "fiber_label_file")]),
(thalamus2precuneus2cortex, outputnode, [("intersection_matrix_mat_file", "intersection_matrix_mat_file")]),
(fdgpet_regions, outputnode, [("networks", "CMR_nodes")]),
(nonlinfit_node, outputnode, [("rgb_fa", "rgb_fa")]),
(nonlinfit_node, outputnode, [("MD", "md")]),
(write_csv_data, outputnode, [("out_file", "summary")]),
])
return workflow
def create_precoth_pipeline_step1(name="precoth_step1", reg_pet_T1=True, auto_reorient=True):
inputnode = pe.Node(
interface=util.IdentityInterface(fields=["subjects_dir",
"subject_id",
"dwi",
"bvecs",
"bvals",
"fdgpet"]),
name="inputnode")
nonlinfit_interface = util.Function(input_names=["dwi", "bvecs", "bvals", "base_name"],
output_names=["tensor", "FA", "MD", "evecs", "evals", "rgb_fa", "norm", "mode", "binary_mask", "b0_masked"], function=nonlinfit_fn)
nonlinfit_node = pe.Node(interface=nonlinfit_interface, name="nonlinfit_node")
erode_mask_firstpass = pe.Node(interface=mrtrix.Erode(),
name='erode_mask_firstpass')
erode_mask_firstpass.inputs.out_filename = "b0_mask_median3D_erode.nii.gz"
erode_mask_secondpass | |
self.__cmp__(other) != 0
def __lt__(self: FixedPointType, other: Any) -> bool:
"""Less than comparison operator."""
return self.__cmp__(other) < 0
def __le__(self: FixedPointType, other: Any) -> bool:
"""Less than or equal to comparison operator."""
return self.__cmp__(other) <= 0
def __gt__(self: FixedPointType, other: Any) -> bool:
"""Greater than comparison operator."""
return self.__cmp__(other) > 0
def __ge__(self: FixedPointType, other: Any) -> bool:
"""Greater than or equal to comparison operator."""
return self.__cmp__(other) >= 0
###########################################################################
# Context management
###########################################################################
def __call__(self: FixedPointType, *, safe_retain: bool = False,
**props: Union[int, bool, str]) -> FixedPointType:
"""Context initialization.
Assign properties temporarily. Any argument to the __init__ function can
be used here except the initialization value.
If `safe_retain` evaluates as True, the changes that occur inside the
with statement context will be retained after the context ends only if
there are no exceptions.
The order in which properties are specified is honored.
"""
try:
self.__context['safe_retain'] = bool(safe_retain)
for attr, value in props.items():
# Only change valid attributes
if f'_{attr}' not in self.__slots__:
raise AttributeError(f"Invalid FixedPoint attribute "
f"{attr!r}.")
# Caller should not try to be accessing any "private" variables
if attr.startswith('_'):
raise PermissionError(f"Access to {attr!r} is prohibited.")
self.__context[f'_{attr}'] = value
except Exception:
self.__context = {}
raise
return self
def __enter__(self: FixedPointType) -> FixedPointType:
"""Save the current attributes for later restoration."""
# Push the current context onto the context manager stack
self.__cmstack.append(DEFAULT_ENCODER.encode(self))
# Push the safe_retain option to the context manager stack
self.__cmstack.append(self.__context.pop('safe_retain', False))
# Assign temporary context items from __call__ to the current object.
try:
# Note that __call__ is optional, so __context may be empty.
for attr, value in self.__context.items():
# To assign the value to the attribute, go through the property
# setter function if it exists, which validates and normalizes
# the value to the correct type (e.g., @signed.setter ensures
# it's a bool). Filched from
# https://stackoverflow.com/questions/3681272/can-i-get-a-reference-to-a-python-property
prop = self.__class__.__dict__.get(attr[1:], None)
if isinstance(prop, property) and bool(prop.fset):
prop.fset(self, value) # type: ignore
else:
raise PermissionError(f"{attr[1:]!r} is read-only.")
finally:
# Context has been adopted, or some exception was raised. Reset it
# so it can be reused later.
self.__context = {}
return self
def __exit__(self: FixedPointType, exc_type: Type[Exception],
*args: Any) -> None:
"""Conditionally restores/retains FixedPoint attributes.
Context manager saved the context of the FixedPoint object. The context
is restored unless `safe_retain` was True and there were no exception.
"""
# If no exception occurred, and safe_retain is True, do not restore
# context
if self.__cmstack.pop() and exc_type is None:
# Remove context from context manager stack.
self.__cmstack.pop()
return
# See the FixedPointEncoder.default method in .\json.py for the
# serialization order of FixedPoint object attributes.
attributes = DEFAULT_DECODER.decode_attributes(self.__cmstack.pop())
self._bits = attributes.pop()
self._signed, self._m, self._n = attributes.pop()
for attr, value in attributes.pop().items():
self.__class__.__dict__[attr].fset(self, value)
###########################################################################
# Built-in functions and type conversion
###########################################################################
def __abs__(self: FixedPointType) -> FixedPointType:
"""Absolute value. This is the "abs()" function.
Returns a copy of self is positive or a negated copy of self if
negative. Signedness does not change.
"""
if self._negweight():
ret = -self
else:
ret = self.__class__.__new(self._bits, self._signed, self._m,
self._n, self.overflow,
self.rounding, self.str_base,
self.overflow_alert,
self.implicit_cast_alert,
self.mismatch_alert)
return ret
def __int__(self: FixedPointType) -> int:
"""Integer cast of the fixed point value. Integer bits only."""
return self._signedint >> self._n
def __float__(self: FixedPointType) -> float:
"""Floating point representation of the stored value."""
try:
ret = float((ret := self._signedint) * 2**-self._n)
except OverflowError:
ret = float('-inf' if ret < 0 else 'inf')
return ret
def __bool__(self: FixedPointType) -> bool: # noqa: D401
"""True if non-zero."""
return bool(self._bits)
def __index__(self: FixedPointType) -> int:
"""Bits of the FixedPoint number."""
return self._bits
def __str__(self: FixedPointType) -> str: # noqa: D401
"""String representation of the stored value w/out its radix.
Use the str_base property to adjust which base to use for this function.
For str_base of 2, 8, or 16, output is 0-padded to the bit width.
"""
ret = StrConv[self.str_base](self._bits)
# Zero padding
if self.str_base == 10:
return ret
# Remove radix
ret = ret[2:]
bits_needed = self._m + self._n
nzeros = _ceil(bits_needed / _log2(self.str_base))
return ret.zfill(nzeros)
def __format__(self: FixedPointType, spec: str) -> str:
"""Format as a string."""
# All bits
if spec == '' or spec[-1] in 'bdoxX' or spec is None:
ret = format(self._bits, spec)
# Integer bits
elif spec[-1] in 'm':
ret = format((self._bits >> self._n) & (2**self._m - 1), spec[:-1])
# Fractional bits
elif spec[-1] in 'n':
ret = format(self._bits & (2**self._n - 1), spec[:-1])
# str()
elif spec[-1] in 's':
ret = format(str(self), spec)
# float()
elif spec[-1] in 'eEfFgG%':
ret = format(float(self), spec)
# qformat
elif spec[-1] == 'q':
ret = format(self.qformat, f"{spec[:-1]}s")
else:
raise ValueError(f"Unknown format code {spec!r}.")
return ret
def __repr__(self: FixedPointType) -> str:
"""Python-executable code string, allows for exact reproduction."""
str_base = self.str_base
return (
f"FixedPoint({StrConv[str_base](self._bits)!r}, "
f"signed={int(self._signed)}, "
f"m={self._m}, "
f"n={self._n}, "
f"overflow={self.overflow!r}, "
f"rounding={self.rounding!r}, "
f"overflow_alert={self.overflow_alert!r}, "
f"mismatch_alert={self.mismatch_alert!r}, "
f"implicit_cast_alert={self.implicit_cast_alert!r}, "
f"{str_base=})")
###########################################################################
# Bit resizing methods
###########################################################################
def resize(self: FixedPointType, m: int, n: int, /, rounding: str = None,
overflow: str = None, alert: str = None) -> None:
"""Resize integer and fractional bit widths.
Overflow handling, sign-extension, and rounding are employed.
Override rounding, overflow, and overflow_alert settings for the
scope of this method by specifying the appropriate arguments.
"""
old = self._overflow, self._rounding, self._overflow_alert
try:
with self(safe_retain=True,
overflow=overflow or self.overflow,
rounding=rounding or self.rounding,
overflow_alert=alert or self.overflow_alert):
self.n = n
self.m = m
except Exception:
raise
else:
self._overflow, self._rounding, self._overflow_alert = old
def trim(self: FixedPointType, /, ints: bool = None,
fracs: bool = None) -> None:
"""Trims off insignificant bits.
This includes trailing 0s, leading 0s, and leading 1s as appropriate.
Trim only integer bits or fractional bits by setting `fracs` or `ints`
to True. By default, both integer and fractional bits are trimmed.
"""
if ints is None and fracs is None:
ints, fracs = True, True
s, m, n = bool(self._signed), self._m, self._n
# Trailing 0s on fractional bits can be stripped
if fracs:
n = len(self.bits['N'].rstrip('0')) if n else 0
if ints:
# Remove leading 1s for negative numbers, leave 1 though
if self._signedint < 0:
m = 1 + len(self.bits['M'].lstrip('1'))
# Remove all leading 0s
# For signed, minimum m is 1
# For unsigned, m can be 0 iff n is non-zero
elif self._m:
m = max(s or n == 0,
s + len(self.bits['M'].lstrip('0')))
else:
m = self._m
self._log("INTS: %s\n"
"FRACS: %s\n"
"Trimming %d fractional bits\n"
"Trimming %d integer bits\n",
ints, fracs, self._n - n, self._m - m)
self._bits >>= (self._n - n)
self._n = n
self._m = int(m or n == 0)
self._bits &= self.bitmask
# _________________________________________________________________________
# Rounding methods
def __rounding_arg_check(self: FixedPointType, nfrac: int) -> None:
"""Validate rounding arguments."""
if not isinstance(nfrac, int):
raise TypeError(f"Expected {type(1)}; got {type(nfrac)}.")
if self._m + nfrac == 0 or (self._m == 0) == self._n:
raise ValueError("Word size (integer and fractional) "
"must be positive.")
if not (self._m == 0) <= nfrac < self._n:
raise ValueError("Number of fractional bits remaining after round "
"must be in the range "
f"[{int(self._m == 0)}, {self._n}).")
def __round__(self: FixedPointType, nfrac: int, /) -> FixedPointType:
"""Fractional rounding. This is the "round()" function.
The rounding method used by this function is specified by the
rounding attribute of this object.
"""
ret: FixedPointType = self.__class__.__new(self._bits, self._signed,
self._m, self._n,
self.overflow,
self.rounding, self.str_base,
self.overflow_alert,
self.implicit_cast_alert,
self.mismatch_alert)
ret.round(nfrac)
return ret
def __floor__(self: FixedPointType) -> FixedPointType:
"""Round to negative infinity, leave fractional bit width unmodified."""
# When binary bits are truncated, it rounds to negative infinity.
ret = self.__class__.__new(self._bits, self._signed, self._m,
self._n, self.overflow,
self.rounding, self.str_base,
self.overflow_alert,
self.implicit_cast_alert,
self.mismatch_alert)
if self._n:
ret._bits &= ~(2**self._n - 1) & self.bitmask
return ret
def __ceil__(self: FixedPointType) -> FixedPointType:
"""Round to positive infinity, leaving 0 fractional bits."""
ret = self.__class__.__new(self._bits, self._signed, self._m,
self._n, self.overflow,
self.rounding, self.str_base,
self.overflow_alert,
self.implicit_cast_alert,
self.mismatch_alert)
ret.round_up(0)
return ret
def __trunc__(self: FixedPointType) -> FixedPointType:
"""Truncate all fractional bits. | |
nan ],
[ 870, 42.1211, 0.1440, 63.9225, 0.0949, 39.8764, 0.1521, 22.0460, 0.2751, 1.39e-06, 1.38e-06, 1.39e-06, nan ],
[ 880, 42.8106, 0.1450, 66.0631, 0.0939, 40.5434, 0.1531, 21.5649, 0.2878, 1.39e-06, 1.33e-06, 1.27e-06, nan ],
[ 890, 42.5972, 0.1490, 65.4134, 0.0970, 40.9589, 0.1550, 21.8761, 0.2902, 1.26e-06, 1.24e-06, 1.18e-06, nan ],
[ 900, 43.2820, 0.1500, 66.8904, 0.0970, 41.3745, 0.1569, 22.0084, 0.2949, 1.26e-06, 1.34e-06, 1.34e-06, nan ],
[ 1000, 48.5618, 0.1650, 77.0751, 0.1040, 46.0339, 0.1740, 22.3139, 0.3591, 1.49e-06, 1.60e-06, 1.64e-06, nan ],
[ 1100, 52.6635, 0.1841, 86.5026, 0.1121, 30.2052, 0.3209, 23.0216, 0.4210, 1.83e-06, 1.80e-06, 1.62e-06, nan ],
[ 1200, 57.7312, 0.1998, 95.2338, 0.1211, 32.3172, 0.3569, 23.0705, 0.5000, 1.83e-06, 1.83e-06, 2.04e-06, nan ],
[ 1300, 63.2210, 0.2141, 104.9398, 0.1290, 35.2405, 0.3841, 23.4597, 0.5770, 1.80e-06, 1.44e-06, 1.60e-06, nan ],
[ 1400, 70.7166, 0.2220, 114.4994, 0.1371, 38.3666, 0.4091, 23.5385, 0.6669, 1.42e-06, 1.49e-06, 1.50e-06, nan ],
[ 1500, 73.2296, 0.2460, 120.9168, 0.1490, 40.4783, 0.4451, 23.3106, 0.7730, 1.96e-06, 2.04e-06, 2.03e-06, nan ],
[ 1600, 77.3896, 0.2649, 129.8790, 0.1578, 42.7123, 0.4799, 21.8334, 0.9389, 1.81e-06, 2.06e-06, 1.93e-06, nan ],
[ 1700, 84.1786, 0.2749, 137.6707, 0.1681, 44.4201, 0.5209, 19.2384, 1.2028, 1.98e-06, 2.25e-06, 1.93e-06, nan ],
[ 1800, 88.5329, 0.2930, 143.3557, 0.1810, 46.6582, 0.5560, 17.6463, 1.4701, 2.09e-06, 2.04e-06, 1.90e-06, nan ],
[ 1900, 93.8290, 0.3080, 148.9277, 0.1941, 49.6629, 0.5820, 17.0718, 1.6930, 2.22e-06, 1.96e-06, 1.98e-06, nan ],
[ 2000, 97.3322, 0.3290, 156.9140, 0.2041, 51.7405, 0.6189, 17.0976, 1.8730, 1.91e-06, 1.95e-06, 1.93e-06, nan ],
[ 2100, 102.9053, 0.3431, 159.0556, 0.2220, 38.7544, 0.9110, 17.0895, 2.0659, 2.56e-06, 2.68e-06, 2.56e-06, nan ],
[ 2200, 108.1985, 0.3581, 167.7133, 0.2310, 40.8225, 0.9491, 16.8601, 2.2981, 2.13e-06, 2.19e-06, 2.33e-06, nan ],
[ 2300, 115.0380, 0.3681, 174.9938, 0.2420, 42.4722, 0.9971, 17.2429, 2.4559, 2.87e-06, 2.99e-06, 2.98e-06, nan ],
[ 2400, 119.0852, 0.3872, 182.9653, 0.2520, 44.2550, 1.0419, 17.2951, 2.6660, 2.99e-06, 3.08e-06, 3.07e-06, nan ],
[ 2500, 122.2850, 0.4091, 187.3581, 0.2670, 45.8168, 1.0920, 17.2453, 2.9011, 2.64e-06, 2.58e-06, 2.77e-06, nan ],
[ 2600, 126.0882, 0.4292, 193.3210, 0.2799, 47.6704, 1.1351, 17.2934, 3.1290, 2.55e-06, 2.38e-06, 2.29e-06, nan ],
[ 2700, 132.0106, 0.4420, 199.1438, 0.2930, 49.1560, 1.1871, 17.4086, 3.3519, 2.39e-06, 2.38e-06, 2.29e-06, nan ],
[ 2800, 135.5343, 0.4630, 207.9050, 0.3018, 50.8516, 1.2341, 17.5776, 3.5701, 2.93e-06, 3.13e-06, 2.89e-06, nan ],
[ 2900, 140.2577, 0.4799, 211.8070, 0.3178, 52.3043, 1.2870, 18.2225, 3.6941, 2.95e-06, 2.53e-06, 2.78e-06, nan ],
[ 3000, 143.5349, 0.5019, 217.0552, 0.3319, 53.8384, 1.3380, 17.4759, 4.1220, 3.20e-06, 2.67e-06, 3.06e-06, nan ],
[ 3100, 149.6355, 0.5140, 221.5756, 0.3471, 42.9980, 1.7889, 16.9868, 4.5280, 2.46e-06, 2.55e-06, 2.72e-06, nan ],
[ 3200, 152.9175, 0.5360, 237.5663, 0.3450, 45.1067, 1.8170, 17.6332, 4.6480, 2.75e-06, 2.92e-06, 2.90e-06, nan ],
[ 3300, 159.0835, 0.5479, 234.3422, 0.3719, 46.3867, 1.8790, 17.6973, 4.9250, 3.11e-06, 2.96e-06, 3.03e-06, nan ],
[ 3400, 162.5724, 0.5691, 239.1007, 0.3870, 47.2783, 1.9569, 17.7895, 5.2009, 2.59e-06, 2.88e-06, 2.86e-06, nan ],
[ 3500, 166.4850, 0.5889, 251.3557, 0.3901, 48.8035, 2.0089, 17.8256, 5.5001, 3.18e-06, 3.71e-06, 3.01e-06, nan ],
[ 3600, 171.6838, 0.6042, 250.0268, 0.4148, 49.9652, 2.0759, 16.5266, 6.2761, 3.40e-06, 3.27e-06, 3.22e-06, nan ],
[ 3700, 173.6103, 0.6311, 256.5865, 0.4270, 50.9136, 2.1520, 17.8098, 6.1519, 3.51e-06, 3.65e-06, 4.09e-06, nan ],
[ 3800, 176.7106, 0.6540, 258.5159, 0.4470, 52.3170, 2.2089, 17.8395, 6.4781, 2.91e-06, 3.01e-06, 2.65e-06, nan ],
[ 3900, 178.5172, 0.6819, 256.8205, 0.4740, 53.4561, 2.2771, 17.9087, 6.7971, 3.17e-06, 2.72e-06, 2.75e-06, nan ],
[ 4000, 178.7857, 0.7162, 263.0128, 0.4869, 54.9097, 2.3320, 17.4811, 7.3249, 4.09e-06, 3.94e-06, 3.94e-06, nan ],
[ 4100, 176.3301, 0.7629, 265.9078, 0.5059, 46.2316, 2.9099, 17.8896, 7.5200, 3.14e-06, 3.31e-06, 3.24e-06, nan ],
[ 4200, 177.5980, 0.7949, 261.8804, 0.5391, 47.2291, 2.9891, 17.8315, 7.9169, 3.37e-06, 3.49e-06, 3.26e-06, nan ],
[ 4300, 179.7908, 0.8230, 261.8725, 0.5651, 48.7693, 3.0341, 17.9645, 8.2369, 3.20e-06, 3.37e-06, 2.99e-06, nan ],
[ 4400, 181.6197, 0.8531, 259.9341, 0.5960, 49.3721, 3.1381, 17.9136, 8.6489, 3.44e-06, 3.55e-06, 3.22e-06, nan ],
[ 4500, 182.5198, 0.8879, 256.0089, 0.6330, 50.3559, 3.2182, 17.9740, 9.0160, 3.62e-06, 3.52e-06, 3.49e-06, nan ],
[ 4600, 187.8951, 0.9012, 255.0245, 0.6640, 51.4222, 3.2930, 16.7958, 10.0820, 3.91e-06, 3.50e-06, 3.45e-06, nan ],
[ 4700, 186.2482, 0.9491, 258.0766, 0.6850, 52.1783, 3.3879, 17.8059, 9.9280, 3.99e-06, 4.17e-06, 4.09e-06, nan ],
[ 4800, 187.3845, 0.9840, 261.1738, 0.7060, 52.8596, 3.4881, 17.8538, 10.3271, 4.05e-06, 3.51e-06, 3.61e-06, nan ],
[ 4900, 187.6341, 1.0240, 262.0776, 0.7331, 54.1701, 3.5470, 17.9653, 10.6950, 3.57e-06, 3.64e-06, 3.62e-06, nan ],
[ 5000, 188.0575, 1.0638, 260.8369, 0.7670, 55.0202, 3.6361, 17.6018, 11.3659, 4.27e-06, 4.90e-06, 4.33e-06, nan ],
[ 5100, 190.7796, 1.0910, 265.1906, 0.7849, 55.9083, 3.7229, 17.7961, 11.6959, 4.20e-06, 4.50e-06, 4.68e-06, nan ],
[ 5200, 191.3098, 1.1311, 265.5277, 0.8149, 49.0315, 4.4131, 17.5096, 12.3580, 3.88e-06, 3.88e-06, 3.79e-06, nan ],
[ 5300, 193.1198, 1.1640, 261.6738, 0.8590, 49.6086, 4.5311, 17.7260, 12.6810, 3.67e-06, 3.55e-06, 3.63e-06, nan ],
[ 5400, 193.8057, 1.2040, 266.6809, 0.8750, 49.8710, 4.6790, 17.5779, 13.2749, 4.65e-06, 4.40e-06, 4.29e-06, nan ],
[ 5500, 195.7013, 1.2369, 261.4054, 0.9260, 51.2622, 4.7221, 17.8464, 13.5639, 3.96e-06, 3.73e-06, 4.32e-06, nan ],
[ 5600, 198.2201, 1.2660, 267.2801, 0.9389, 52.4674, 4.7829, 17.8993, 14.0200, 4.03e-06, 4.28e-06, 4.32e-06, nan ],
[ 5700, 198.1593, 1.3120, 266.3582, 0.9761, 52.4592, 4.9560, 17.8283, 14.5829, 4.19e-06, 4.61e-06, 4.21e-06, nan ],
[ 5800, 199.9757, 1.3461, 266.5399, 1.0099, 53.9447, 4.9901, 17.6958, 15.2121, 4.50e-06, 4.40e-06, 4.13e-06, nan ],
[ 5900, 201.5399, 1.3821, 270.6966, 1.0290, 54.4142, 5.1191, 18.0278, 15.4512, 4.33e-06, 4.35e-06, 4.11e-06, nan ],
[ 6000, 202.2872, 1.4241, 267.4920, 1.0769, 55.4299, 5.1970, 17.4453, 16.5129, 4.16e-06, 4.41e-06, 3.83e-06, nan ],
[ 6100, 203.9300, 1.4601, 271.1393, 1.0982, 55.4880, 5.3661, 12.1920, 24.4219, 4.37e-06, 4.55e-06, 4.55e-06, nan ],
[ 6200, 205.7647, 1.4949, 269.1165, 1.1430, 50.0075, 6.1510, 17.7410, 17.3380, 3.80e-06, 3.64e-06, 3.88e-06, nan ],
[ 6300, 207.9770, 1.5271, 273.3058, 1.1621, 50.4027, 6.3012, 17.9433, 17.7000, 4.89e-06, 4.27e-06, 4.75e-06, nan ],
[ 6400, 207.1597, 1.5821, 276.6019, 1.1849, 51.1159, 6.4120, 19.5419, 16.7720, 4.76e-06, 4.43e-06, 4.15e-06, nan ],
[ 6500, 210.6361, 1.6050, 275.0731, 1.2290, 51.2543, 6.5961, 17.9972, 18.7850, 4.80e-06, 4.15e-06, 4.97e-06, nan ],
[ 6600, 213.3007, 1.6341, 278.1513, 1.2531, 52.4471, 6.6459, 17.8065, 19.5749, 4.36e-06, 4.63e-06, 4.43e-06, nan ],
[ 6700, 215.7210, 1.6651, 276.1356, 1.3008, 52.5532, 6.8350, 18.0312, 19.9211, 5.18e-06, 4.38e-06, 4.88e-06, nan ],
[ 6800, 212.6472, 1.7400, 279.2190, 1.3251, 54.3725, 6.8049, 17.7844, 20.8049, 4.36e-06, 3.81e-06, 4.20e-06, nan ],
[ 6900, 216.0749, 1.7631, 279.5441, 1.3628, 54.5238, 6.9871, 18.0253, 21.1349, 4.06e-06, 3.64e-06, 3.62e-06, nan ],
[ 7000, 216.3557, 1.8122, 281.0664, 1.3950, 54.6461, 7.1750, 18.0020, 21.7800, 4.19e-06, 4.06e-06, 3.99e-06, nan ],
[ 7100, 217.9078, 1.8511, 280.1053, 1.4400, 55.3756, 7.2842, 18.0153, 22.3901, 4.30e-06, 4.02e-06, 3.69e-06, nan ],
[ 7200, 220.7619, 1.8790, 282.5766, 1.4679, 49.8317, 8.3241, 17.6723, 23.4721, 4.07e-06, 4.35e-06, 4.07e-06, nan ],
[ 7300, 221.2648, 1.9271, 284.6543, 1.4980, 50.9873, 8.3630, 17.6099, 24.2140, 4.46e-06, 4.42e-06, 4.55e-06, nan ],
[ 7400, 223.5510, 1.9600, 285.9964, 1.5321, 52.0937, 8.4112, 17.6042, 24.8899, 4.27e-06, 4.18e-06, 4.23e-06, nan ],
[ 7500, 224.7130, 2.0030, 283.6259, 1.5869, 52.4641, 8.5790, 17.8679, 25.1899, 4.63e-06, 4.19e-06, 4.17e-06, nan ],
[ 7600, 227.3351, 2.0330, 284.4441, 1.6248, 53.0496, 8.7121, 17.2903, 26.7301, 3.98e-06, 3.88e-06, 4.23e-06, nan ],
[ 7700, 225.0430, 2.1081, 284.0991, 1.6699, 53.3466, 8.8930, 16.4898, 28.7700, 5.22e-06, 4.58e-06, 4.81e-06, nan ],
[ 7800, 225.4936, 2.1589, 284.3398, 1.7121, 54.0128, 9.0129, 17.4372, 27.9181, 4.48e-06, 5.09e-06, 4.66e-06, nan ],
[ 7900, 224.4460, 2.2249, 284.5442, 1.7550, 54.6190, 9.1429, 17.7423, 28.1460, 4.67e-06, 4.82e-06, 4.79e-06, nan ],
[ 8000, 224.3926, 2.2821, 281.0634, 1.8220, 54.6480, 9.3708, 17.8474, 28.6930, 4.53e-06, 4.92e-06, 5.01e-06, nan ],
[ 8100, 226.3015, 2.3198, 282.6953, 1.8570, 55.6995, 9.4252, 17.3214, 30.3080, 5.05e-06, 5.03e-06, 4.90e-06, nan ],
[ 8200, 226.5448, 2.3749, 284.6383, 1.8902, 50.1971, 10.7181, 16.4481, 32.7101, 5.15e-06, 5.52e-06, 5.19e-06, nan ],
[ 8300, 228.3440, 2.4140, 289.2147, 1.9059, 51.1568, 10.7751, 17.3667, 31.7400, 6.48e-06, 6.03e-06, 5.72e-06, nan ],
[ 8400, 225.5690, 2.5029, 289.2419, 1.9519, 51.7963, 10.9000, 16.9876, 33.2348, 4.66e-06, 4.62e-06, 4.80e-06, nan ],
[ 8500, 229.2244, 2.5220, 285.1959, 2.0270, 52.0676, 11.1029, 17.7638, 32.5439, 5.47e-06, 5.63e-06, 5.71e-06, nan ],
[ 8600, 227.8636, 2.5971, 280.5922, 2.1091, 53.0707, 11.1508, 16.6306, 35.5840, 5.47e-06, 5.26e-06, 5.22e-06, nan ],
[ 8700, 226.9835, 2.6681, 286.6042, 2.1131, 53.2141, 11.3809, 17.4280, 34.7500, 5.47e-06, 5.42e-06, 5.36e-06, nan ],
[ 8800, 225.8144, 2.7440, 282.8271, 2.1908, 53.2790, 11.6298, 17.8602, 34.6930, 5.95e-06, 5.68e-06, 5.61e-06, nan ],
[ 8900, 227.5742, 2.7850, 285.7459, 2.2180, 54.3607, 11.6589, 17.9127, 35.3820, 5.69e-06, 5.32e-06, 5.26e-06, nan ],
[ 9000, 227.8212, 2.8448, 283.7835, 2.2838, 54.6415, 11.8611, 17.5939, 36.8371, 5.18e-06, 5.31e-06, 5.75e-06, nan ],
[ 10000, 233.8802, 3.4211, 286.8821, 2.7890, 54.8743, 14.5810, 17.3314, 46.1659, 5.70e-06, 5.45e-06, 4.74e-06, nan ],
[ 12000, 238.6863, 4.8270, 290.7957, 3.9620, 54.5318, 21.1279, 17.3796, 66.2930, 6.72e-06, 6.68e-06, 6.35e-06, nan ],
[ 14000, 248.3621, 6.3140, 295.9981, 5.2979, 54.5143, 28.7662, 16.9389, 92.5779, 7.49e-06, 8.12e-06, 7.90e-06, nan ],
[ 16000, 252.4609, 8.1129, 293.8110, 6.9711, 54.6419, 37.4839, 15.6665, 130.7368, 6.96e-06, 6.64e-06, 6.96e-06, nan ],
[ 18000, 252.7499, 10.2561, 295.5780, 8.7700, 54.9235, 47.1969, 16.5584, 156.5499, 7.60e-06, 7.60e-06, 8.15e-06, nan ],
[ 20000, 261.2656, 12.2490, 294.5724, 10.8640, 54.0089, 59.2539, 17.2832, 185.1649, 9.59e-06, 9.18e-06, 9.22e-06, nan ],
])
# ------------------------------------------------------------
# file: v1.6.1/cuda7.0-k40c/cpotrf.txt
# numactl | |
# This script is designed to visualise the outputs from optimisation, particularly the investment related decisions.
# Author name: <NAME>
# Author email = <EMAIL>
# Organisation = Imperial College London
# Date: 05 March 2019
# Version: 1.0
##Elegancy GIS Outputs=name
##Outputs_path=string C:\Users\JohnDoe\Desktop\Optimisation_Files\
##Process_units_file_name=string prod_rate.csv
##Storage_units_file_name=string inventory_rsrc.csv
##Distribution_units_file_name=string total_flow_rate.csv
##Gridded_Shapefile_Layer=vector Polygon
##Gridded_Demand_Layer=optional vector Polygon
##Csv_file_showing_temporal_variations_in_demand=optional string Demand_Ratio.csv
##Enter_1_if_you_would_like_a_demand_variation_map_with_time=optional string 0
# Load libraries from QGIS and Python core
from qgis.core import *
from qgis.utils import *
from PyQt4.QtCore import *
import math
import csv
import os
#Initialise the lists needed for field identification.
major_time = []
process_output = []
process_field_raw = []
strg_output = []
strg_field_raw = []
dist_output = []
dist_field_from = []
dist_field_to = []
process_field_and_coordinates = []
strg_field_and_coordinates = []
dist_from_field_and_coordinates = []
dist_to_field_and_coordinates = []
Demand_Ratio = Csv_file_showing_temporal_variations_in_demand
# Retrieve the grid layer and store it in the following object.
Grid_layer = processing.getObject(Gridded_Shapefile_Layer)
Demand_Layer = processing.getObject(Gridded_Demand_Layer)
# Defining a function to determine the coordinates of each feature.
def coordinate_finder(input_layer):
""" A function to determine the coordinates of all fields in the shape layer """
field_id_long = [] # The list to contain the field ids in long form after reading the feature
field_id = []# The list which contains the actual field ids as an output
coordinates_list = []# The list containing the coordinates corresponding to each field
for feature in input_layer.getFeatures():
# looping over all the features in this layer
coordinates_list.append(list(feature.geometry().centroid().asPoint()))# Add the coordinates of the centroid of each feature/ field.
field_id_long.append(feature[0])# Correspondingly, add the field id to the output list
field_id = map(int,field_id_long)# Convert the field_id_long form into integers for ready manipulation.
return coordinates_list, field_id
coordinates_list, field_id = coordinate_finder(Grid_layer) # Call on any gridded layer to determine all field ids and coordinates
grid_coordinates_raw = [field_id, coordinates_list] # create a new list containing both the field id and corresponding coordinates.
grid_coordinates = list(map(list, zip(*grid_coordinates_raw)))# Reconstruct the combined list such that multiple columns are shown..
for grid in grid_coordinates:
# loop over every grid in this list
coordinates_without_brackets = str(grid[1]).replace('[','').replace(']','')# convert the list of coordinates into string format and replace parentheses.
grid[1] = coordinates_without_brackets #replace the list of coordinates with the string format.
# Collct the field id's from process output and add to list.
Process_units_file_path_and_name = Outputs_path + Process_units_file_name
with open(Process_units_file_path_and_name) as File:
reader = csv.reader(File, delimiter=',', quotechar=',',
quoting=csv.QUOTE_MINIMAL)
for row in reader:
process_output.append(row)# Add each row of the optimisation output file into this list.
for output in process_output[1:]:
#'loop over each row in the output list from first item, ignoring header
process_field_raw.append(output[2])# Add the third column along which should be field id
major_time.append(output[0])
process_field = map(int, process_field_raw) # Convert from string to integer format
process_field.insert(0,"FIELD_ID") # Add a heading as the first list item.
for field in process_field[1:]:
# loop over every field in process outputs file
for grid in grid_coordinates:
# loop over every grid in grid and coordinates
if field in grid:
# check if the field from process output is in the grid and coordinates list.
process_field_and_coordinates.append(grid)# If so, add the grid and coordinates to this new list.
process_field_and_coordinates.insert(0,[" FIELD_ID",["X-COORDINATE","Y-COORDINATE"]])#Insert field headings.
process_compiled_list = [process_output, process_field_and_coordinates]# Produce a compiled list with the grid coordinates added to process output
process_list_with_brackets = list(map(list, zip(*process_compiled_list)))# Reformat the lists in column format.
process_csv_path_and_filename = Outputs_path + "process_final.csv" #Write the output file name and path for the final combined csv file.
process_final = []# List to contain the final output contents.
for process_item in process_list_with_brackets:
# loop over each item in this combined list
process_output_string = str(process_item[0]).replace('[','').replace(']','')# remove the brackets surrounding the field id and convert to string.
field_and_coordinates_string = str(process_item[1]).replace('[','').replace(']','')# remove the brackets surrounding coordinates and convert to string
process_item[0] = process_output_string # Update the first item in the list as a string
process_item[1] = field_and_coordinates_string# Update the coordinates item in this list.
process_item = process_item[0] +"," + process_item[1]
process_item_final = process_item.replace("'","")# Remove apostrophes in output text
process_final.append(process_item_final)# Append the list with refined contents
with open(process_csv_path_and_filename,"wb") as process_file:
# Create the output csv and write each line of the process_final list.
for line in process_final:
process_file.write(line + '\n')
# the following code is a replication of the above code, can be refactored and written as a function
# with a minor modification which is to change the column number of field id.
Storage_units_file_path_and_name = Outputs_path + Storage_units_file_name
with open(Storage_units_file_path_and_name) as File:
reader = csv.reader(File, delimiter=',', quotechar=',',
quoting=csv.QUOTE_MINIMAL)
for row in reader:
strg_output.append(row)
for output in strg_output[1:]:
strg_field_raw.append(output[3])
strg_field = map(int, strg_field_raw)
strg_field.insert(0,"FIELD_ID")
for field in strg_field[1:]:
for grid in grid_coordinates:
if field in grid:
strg_field_and_coordinates.append(grid)
strg_field_and_coordinates.insert(0,[" FIELD_ID",["X-COORDINATE","Y-COORDINATE"]])
strg_compiled_list = [strg_output, strg_field_and_coordinates]
strg_list_with_brackets = list(map(list, zip(*strg_compiled_list)))
strg_csv_path_and_filename = Outputs_path + "storage_final.csv"
strg_final = []
for strg_item in strg_list_with_brackets:
strg_output_string = str(strg_item[0]).replace('[','').replace(']','')
field_and_coordinates_string = str(strg_item[1]).replace('[','').replace(']','')
strg_item[0] = strg_output_string
strg_item[1] = field_and_coordinates_string
strg_item = strg_item[0] +"," + strg_item[1]
strg_item_final = strg_item.replace("'","")
strg_final.append(strg_item_final)
with open(strg_csv_path_and_filename,"wb") as strg_file:
for line in strg_final:
strg_file.write(line + '\n')
# Again this code is a replciation of the above with the exception being field id columns.
# Rewrite as a common function if limited by CPU speed, it is written this way to assist in
# any rapid prototyping allowing changes to be made to the output files.
Distribution_units_file_path_and_name = Outputs_path + Distribution_units_file_name
with open(Distribution_units_file_path_and_name) as File:
reader = csv.reader(File, delimiter=',', quotechar=',',
quoting=csv.QUOTE_MINIMAL)
for row in reader:
dist_output.append(row)
for output in dist_output[1:]:
dist_field_from.append(output[2])
dist_field_to.append(output[3])
dist_grid_from = map(int, dist_field_from)
dist_grid_from.insert(0,"FIELD_ID")
dist_grid_to = map(int, dist_field_to)
dist_grid_to.insert(0,"FIELD_ID")
dist_lists = [dist_grid_from, dist_grid_to]
dist_fields_and_coordinates = [dist_from_field_and_coordinates, dist_to_field_and_coordinates]
for distribution_list in dist_lists:
# loop over from field list and to field list.
list_index = dist_lists.index(distribution_list)# Store the index of the list in this variable
for field in distribution_list[1:]:
# loop over all fields in the designated list, ignoring the header.
for grid in grid_coordinates:
# loop over all potential grids in the layer
if field in grid:
# check if the field in either from or to lists is in the grid, if so add it to the list below.
dist_fields_and_coordinates[list_index].append(grid)
dist_fields_and_coordinates[0].insert(0,["FROM FIELD_ID",["FROM X-COORDINATE","FROM Y-COORDINATE"]])
dist_fields_and_coordinates[1].insert(0,["TO FIELD_ID",["TO X-COORDINATE","TO Y-COORDINATE"]])
dist_compiled_list = [dist_output, dist_fields_and_coordinates[0], dist_fields_and_coordinates[1]]
dist_list_with_brackets = list(map(list, zip(*dist_compiled_list)))
dist_csv_path_and_filename = Outputs_path + "distribution_final.csv"
dist_final = []
for dist_item in dist_list_with_brackets:
dist_output_string = str(dist_item[0]).replace('[','').replace(']','')
from_field_and_coordinates_string = str(dist_item[1]).replace('[','').replace(']','')
to_field_and_coordinates_string = str(dist_item[2]).replace('[','').replace(']','')
dist_item[0] = dist_output_string
dist_item[1] = from_field_and_coordinates_string
dist_item[2] = to_field_and_coordinates_string
dist_item = dist_item[0] +"," + dist_item[1] +","+ dist_item[2]
dist_item_final = dist_item.replace("'","")
dist_final.append(dist_item_final)
with open(dist_csv_path_and_filename,"wb") as dist_file:
for line in dist_final:
dist_file.write(line + '\n')
"""
Frpm here on, the written process csv is reread and the items are sorted
This is specific to the problem instance
"""
# The number of specific occurences of major time is revealed and held in a new set for future use.
unique_major_time = list(set(major_time))
def writeSortedOutput(input_list_of_tech_strings,all_tech_lists,first_line_of_csv):
" A function to sort the contents of the technology csv and save it as a vector layer"
csv_files = []# A list containing path strings for all technology csv.
for tech in all_tech_lists:
# loop over all technologies in a list of technologies.
index_for_string = all_tech_lists.index(tech)# create an index representing the technology number in the list.
for tm in unique_major_time:
csv_path = Outputs_path + input_list_of_tech_strings[index_for_string]+ "_"+ "%s.csv" % (tm)
# depending on the index write the name of tech as csv
csv_files.append(csv_path)# add the path string into the list to csv files.
with open(csv_path,"wb") as csv_file:
# Create a new csv file which contains the output contents specific to that technology.
csv_file.write(first_line_of_csv + '\n')
for item in tech:
if item[0] == tm:
# loop over all items in tech and tidy up the contents without brackets and aposrophes.
item_final = str(item).replace("'","").replace("[","").replace("]","")
csv_file.write(item_final + '\n')# write the refined output for each technology.
# The following focuses on adding the outputs as layer to the map canvas
for path, directories, files in os.walk(Outputs_path):
for file in files:
# looping over all the files in the output path specified and checking if the technology related files are present.
if str(os.path.join(path,file)) in csv_files:
fullname = os.path.join(path, file).replace('\\', '/')
filename = os.path.splitext(os.path.basename(fullname))[0]
uri = 'file:///%s?crs=%s&delimiter=%s&xField=%s&yField=%s' % (fullname, Grid_layer.crs().authid(), ',', 'X COORDINATE', 'Y COORDINATE')
# uri specifies the file path, delimiter to use in the text file, name, coordinate system, x and y coordinates.
layer = QgsVectorLayer(uri, 'layer', 'delimitedtext')# Creates a layer using the details above.
QgsVectorFileWriter.writeAsVectorFormat(layer, Outputs_path + '/' + filename + '.shp', 'CP1250', None, 'ESRI | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME> (Nagoya University)
# based on a WaveNet script by <NAME> (Nagoya University)
# (https://github.com/kan-bayashi/PytorchWaveNetVocoder)
# based on sprocket-vc script by <NAME> (Nagoya University)
# (https://github.com/k2kobayashi/sprocket)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
import argparse
import logging
import multiprocessing as mp
import os
import sys
import copy
import pyworld
import numpy as np
from distutils.util import strtobool
from numpy.matlib import repmat
from scipy.interpolate import interp1d
from scipy.io import wavfile
from scipy.signal import firwin
from scipy.signal import lfilter
from sprocket.speech.feature_extractor import FeatureExtractor
from sprocket.speech.synthesizer import Synthesizer
from utils import (find_files, read_txt, read_hdf5, write_hdf5, check_hdf5)
def _get_arguments():
parser = argparse.ArgumentParser(
description="making feature file argsurations.")
# path setting
parser.add_argument("--waveforms", required=True,
type=str, help="directory or list of input wav files")
parser.add_argument("--feature_dir", default=None,
type=str, help="directory of output featfile")
# acoustic feature setting
parser.add_argument("--feature_type", default="world", choices=["world"],
type=str, help="feature type")
parser.add_argument("--feature_format", default="h5",
type=str, help="feature format")
parser.add_argument("--fs", default=22050,
type=int, help="sampling frequency")
parser.add_argument("--shiftms", default=5.0,
type=float, help="frame shift in msec")
parser.add_argument("--fftl", default=1024,
type=int, help="FFT length")
parser.add_argument("--minf0", default=40,
type=float, help="minimum f0")
parser.add_argument("--maxf0", default=400,
type=float, help="maximum f0")
parser.add_argument("--pow_th", default=-20,
type=float, help="speech power threshold")
parser.add_argument("--mcep_dim", default=34,
type=int, help="dimension of mel cepstrum")
parser.add_argument("--mcep_dim_start", default=2,
type=int, help="first dimension index of mel cepstrum")
parser.add_argument("--mcep_dim_end", default=37,
type=int, help="last dimension index of mel cepstrum")
parser.add_argument("--mcep_alpha", default=0.455,
type=float, help="Alpha of mel cepstrum")
parser.add_argument("--highpass_cutoff", default=70,
type=int, help="cut off frequency in lowpass filter")
parser.add_argument("--f0_dim_idx", default=1,
type=int, help="f0 dimension index")
parser.add_argument("--ap_dim_idx", default=-2,
type=int, help="ap dimension index")
# flags setting
parser.add_argument("--save_f0", default=True,
type=strtobool, help="if set True, features f0 will be saved")
parser.add_argument("--save_ap", default=False,
type=strtobool, help="if set True, features ap will be saved")
parser.add_argument("--save_spc", default=False,
type=strtobool, help="if set True, features spc will be saved")
parser.add_argument("--save_npow", default=True,
type=strtobool, help="if set True, features npow will be saved")
parser.add_argument("--save_extended", default=False,
type=strtobool, help="if set True, exteneded feature will be saved")
parser.add_argument("--save_vad", default=True,
type=strtobool, help="if set True, features vad_idx will be saved")
parser.add_argument("--overwrite", default=False,
type=strtobool, help="if set True, overwrite the exist feature files")
# other setting
parser.add_argument('--inv', default=True,
type=strtobool, help="if False, wav is restored from acoustic features")
parser.add_argument("--n_jobs", default=10,
type=int, help="number of parallel jobs")
parser.add_argument("--verbose", default=1,
type=int, help="log message level")
return parser.parse_args()
def rootdir_replace(filepath, extname=None, newdir=None):
filename = os.path.basename(filepath)
rootdir = os.path.dirname(filepath)
if extname != None:
filename = '%s.%s' % (filename.split('.')[0], extname)
if newdir == None:
newdir = rootdir
return '%s/%s'%(newdir, filename)
def extfrm(data, npow, power_threshold=-20):
T = data.shape[0]
if T != len(npow):
raise("Length of two vectors is different.")
valid_index = np.where(npow > power_threshold)
extdata = data[valid_index]
assert extdata.shape[0] <= T
return extdata, valid_index[0]
def low_cut_filter(x, fs, cutoff=70):
"""FUNCTION TO APPLY LOW CUT FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low cut filter
Return:
(ndarray): Low cut filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lcf_x = lfilter(fil, 1, x)
return lcf_x
def low_pass_filter(x, fs, cutoff=70, padding=True):
"""APPLY LOW PASS FILTER
Args:
x (ndarray): Waveform sequence
fs (int): Sampling frequency
cutoff (float): Cutoff frequency of low pass filter
Return:
(ndarray): Low pass filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
numtaps = 255
fil = firwin(numtaps, norm_cutoff)
x_pad = np.pad(x, (numtaps, numtaps), 'edge')
lpf_x = lfilter(fil, 1, x_pad)
lpf_x = lpf_x[numtaps + numtaps // 2: -numtaps // 2]
return lpf_x
def extend_time(feats, upsampling_factor):
"""EXTEND TIME RESOLUTION
Args:
feats (ndarray): feature vector with the shape (T x D)
upsampling_factor (int): upsampling_factor
Return:
(ndarray): extend feats with the shape (upsampling_factor*T x D)
"""
# get number
n_frames = feats.shape[0]
n_dims = feats.shape[1]
# extend time
feats_extended = np.zeros((n_frames * upsampling_factor, n_dims))
for j in range(n_frames):
start_idx = j * upsampling_factor
end_idx = (j + 1) * upsampling_factor
feats_extended[start_idx: end_idx] = repmat(feats[j, :], upsampling_factor, 1)
return feats_extended
def convert_continuos_f0(f0):
"""CONVERT F0 TO CONTINUOUS F0
Args:
f0 (ndarray): original f0 sequence with the shape (T)
Return:
(ndarray): continuous f0 with the shape (T)
"""
# get uv information as binary
uv = np.float32(f0 != 0)
# get start and end of f0
if (f0 == 0).all():
logging.warn("all of the f0 values are 0.")
return uv, f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
cont_f0 = copy.deepcopy(f0)
start_idx = np.where(cont_f0 == start_f0)[0][0]
end_idx = np.where(cont_f0 == end_f0)[0][-1]
cont_f0[:start_idx] = start_f0
cont_f0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(cont_f0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, cont_f0[nz_frames])
cont_f0 = f(np.arange(0, cont_f0.shape[0]))
return uv, cont_f0
def featpath_create(wav_list, feature_format):
"""CREATE FILE FOLDER"""
for wav_name in wav_list:
feat_name = wav_name.replace("wav", feature_format)
if not os.path.exists(os.path.dirname(feat_name)):
os.makedirs(os.path.dirname(feat_name))
def wavpath_create(wav_list, feature_format):
"""CREATE FILE FOLDER"""
for wav_name in wav_list:
restored_name = wav_name.replace("wav", feature_format+"_restored")
if not os.path.exists(os.path.dirname(restored_name)):
os.makedirs(os.path.dirname(restored_name))
def world_speech_synthesis(queue, wav_list, args):
"""WORLD SPEECH SYNTHESIS
Parameters
----------
queue : multiprocessing.Queue()
the queue to store the file name of utterance
wav_list : list
list of the wav files
args :
feature extract arguments
"""
# define ynthesizer
synthesizer = Synthesizer(fs=args.fs,
fftl=args.fftl,
shiftms=args.shiftms)
# synthesis
for i, wav_name in enumerate(wav_list):
if args.feature_dir==None:
restored_name = wav_name.replace("wav", args.feature_format+"_restored")
restored_name = restored_name.replace(".%s" % args.feature_format+"_restored", ".wav")
feat_name = wav_name.replace("wav", args.feature_format)
else:
restored_name = rootdir_replace(wav_name, newdir=args.feature_dir+"restored")
feat_name = rootdir_replace(wav_name, extname=args.feature_format, newdir=args.feature_dir)
if os.path.exists(restored_name):
if args.overwrite:
logging.info("overwrite %s (%d/%d)" % (restored_name, i + 1, len(wav_list)))
else:
logging.info("skip %s (%d/%d)" % (restored_name, i + 1, len(wav_list)))
continue
else:
logging.info("now processing %s (%d/%d)" % (restored_name, i + 1, len(wav_list)))
# load acoustic features
if check_hdf5(feat_name, "/world"):
h = read_hdf5(feat_name, "/world")
else:
logging.error("%s is not existed."%(feat_name))
sys.exit(1)
if check_hdf5(feat_name, "/f0"):
f0 = read_hdf5(feat_name, "/f0")
else:
uv = h[:, 0].copy(order='C')
f0 = h[:, args.f0_dim_idx].copy(order='C') # cont_f0_lpf
fz_idx = np.where(uv==0.0)
f0[fz_idx] = 0.0
if check_hdf5(feat_name, "/ap"):
ap = read_hdf5(feat_name, "/ap")
else:
codeap = h[:, args.ap_dim_idx:].copy(order='C')
ap = pyworld.decode_aperiodicity(codeap, args.fs, args.fftl)
mcep = h[:, args.mcep_dim_start:args.mcep_dim_end].copy(order='C')
# waveform synthesis
wav = synthesizer.synthesis(f0,
mcep,
ap,
alpha=args.mcep_alpha)
wav = np.clip(wav, -32768, 32767)
wavfile.write(restored_name, args.fs, wav.astype(np.int16))
#logging.info("wrote %s." % (restored_name))
queue.put('Finish')
def world_feature_extract(queue, wav_list, args):
"""EXTRACT WORLD FEATURE VECTOR
Parameters
----------
queue : multiprocessing.Queue()
the queue to store the file name of utterance
wav_list : list
list of the wav files
args :
feature extract arguments
"""
# define feature extractor
feature_extractor = FeatureExtractor(
analyzer="world",
fs=args.fs,
shiftms=args.shiftms,
minf0=args.minf0,
maxf0=args.maxf0,
fftl=args.fftl)
# extraction
for i, wav_name in enumerate(wav_list):
# check exists
if args.feature_dir==None:
feat_name = wav_name.replace("wav", args.feature_format)
else:
feat_name = rootdir_replace(wav_name, extname=args.feature_format, newdir=args.feature_dir)
#if not os.path.exists(os.path.dirname(feat_name)):
# os.makedirs(os.path.dirname(feat_name))
if check_hdf5(feat_name, "/world"):
if args.overwrite:
logging.info("overwrite %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
else:
logging.info("skip %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
continue
else:
logging.info("now processing %s (%d/%d)" % (wav_name, i + 1, len(wav_list)))
# load wavfile and apply low cut filter
fs, x = wavfile.read(wav_name)
x = np.array(x, dtype=np.float32)
if args.highpass_cutoff != 0:
x = low_cut_filter(x, fs, cutoff=args.highpass_cutoff)
# check sampling frequency
if not fs == args.fs:
logging.error("sampling frequency is not matched.")
sys.exit(1)
# extract features
f0, spc, ap = feature_extractor.analyze(x)
codeap = feature_extractor.codeap()
mcep = feature_extractor.mcep(dim=args.mcep_dim, alpha=args.mcep_alpha)
npow = feature_extractor.npow()
uv, cont_f0 = convert_continuos_f0(f0)
lpf_fs = int(1.0 / (args.shiftms * 0.001))
cont_f0_lpf = low_pass_filter(cont_f0, lpf_fs, cutoff=20)
next_cutoff = 70
while not (cont_f0_lpf>[0]).all():
logging.info("%s low-pass-filtered [%dHz]" % (feat_name, next_cutoff))
cont_f0_lpf = low_pass_filter(cont_f0, lpf_fs, cutoff=next_cutoff)
next_cutoff *= 2
# concatenate
cont_f0_lpf = np.expand_dims(cont_f0_lpf, axis=-1)
uv = np.expand_dims(uv, axis=-1)
feats = np.concatenate([uv, cont_f0_lpf, mcep, codeap], axis=1)
# save feature
write_hdf5(feat_name, "/world", feats)
if args.save_f0:
write_hdf5(feat_name, "/f0", f0)
if args.save_ap:
write_hdf5(feat_name, "/ap", ap)
if args.save_spc:
write_hdf5(feat_name, "/spc", spc)
if args.save_npow:
write_hdf5(feat_name, "/npow", npow)
if args.save_extended:
# extend time resolution
upsampling_factor = int(args.shiftms * fs * 0.001)
feats_extended = extend_time(feats, upsampling_factor)
feats_extended = feats_extended.astype(np.float32)
write_hdf5(feat_name, "/world_extend", feats_extended)
if args.save_vad:
_, vad_idx = extfrm(mcep, npow, power_threshold=args.pow_th)
write_hdf5(feat_name, "/vad_idx", vad_idx)
queue.put('Finish')
def main():
# parser arguments
args = _get_arguments()
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S')
logging.warn("logging is disabled.")
# show argmument
for key, value in vars(args).items():
logging.info("%s = %s" | |
chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
return (self.lmbda / self.rho) * np.sign(self.Y)
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`."""
self.Y = np.asarray(sp.prox_l1(self.AX + self.U,
(self.lmbda / self.rho) * self.wl1),
dtype=self.dtype)
super(BPDN, self).ystep()
def obfn_reg(self):
"""Compute regularisation term and contribution to objective
function.
"""
rl1 = np.linalg.norm((self.wl1 * self.obfn_gvar()).ravel(), 1)
return (self.lmbda*rl1, rl1)
class BPDNJoint(BPDN):
r"""
ADMM algorithm for BPDN with joint sparsity via an :math:`\ell_{2,1}`
norm term.
|
.. inheritance-diagram:: BPDNJoint
:parts: 2
|
Solve the optimisation problem
.. math::
\mathrm{argmin}_X \; (1/2) \| D X - S \|_2^2 + \lambda \| X \|_1
+ \mu \| X \|_{2,1}
via the ADMM problem
.. math::
\mathrm{argmin}_{X, Y} \; (1/2) \| D X - S \|_2^2 +
\lambda \| Y \|_1 + \mu \| Y \|_{2,1} \quad \text{such that} \quad
X = Y \;\;.
After termination of the :meth:`solve` method, attribute
:attr:`itstat` is a list of tuples representing statistics of each
iteration. The fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term :math:`(1/2) \| D X - S
\|_2^2`
``RegL1`` : Value of regularisation term :math:`\| X \|_1`
``RegL21`` : Value of regularisation term :math:`\| X \|_{2,1}`
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual Residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``Time`` : Cumulative run time
"""
itstat_fields_objfn = ('ObjFun', 'DFid', 'RegL1', 'RegL21')
hdrtxt_objfn = ('Fnc', 'DFid', u('Regℓ1'), u('Regℓ2,1'))
hdrval_objfun = {'Fnc': 'ObjFun', 'DFid': 'DFid',
u('Regℓ1'): 'RegL1', u('Regℓ2,1'): 'RegL21'}
def __init__(self, D, S, lmbda=None, mu=0.0, opt=None):
"""
|
**Call graph**
.. image:: ../_static/jonga/bpdnjnt_init.svg
:width: 20%
:target: ../_static/jonga/bpdnjnt_init.svg
|
Parameters
----------
D : array_like, shape (N, M)
Dictionary matrix
S : array_like, shape (M, K)
Signal vector or matrix
lmbda : float
Regularisation parameter (l1)
mu : float
Regularisation parameter (l2,1)
opt : :class:`BPDN.Options` object
Algorithm options
"""
if opt is None:
opt = BPDN.Options()
super(BPDNJoint, self).__init__(D, S, lmbda, opt)
self.mu = self.dtype.type(mu)
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`."""
self.Y = np.asarray(sp.prox_sl1l2(
self.AX + self.U, (self.lmbda / self.rho) * self.wl1,
self.mu / self.rho, axis=-1), dtype=self.dtype)
GenericBPDN.ystep(self)
def obfn_reg(self):
r"""Compute regularisation terms and contribution to objective
function. Regularisation terms are :math:`\| Y \|_1` and
:math:`\| Y \|_{2,1}`.
"""
rl1 = np.linalg.norm((self.wl1 * self.obfn_gvar()).ravel(), 1)
rl21 = np.sum(np.sqrt(np.sum(self.obfn_gvar()**2, axis=1)))
return (self.lmbda*rl1 + self.mu*rl21, rl1, rl21)
class ElasticNet(BPDN):
r"""
ADMM algorithm for the elastic net :cite:`zou-2005-regularization`
problem.
|
.. inheritance-diagram:: ElasticNet
:parts: 2
|
Solve the optimisation problem
.. math::
\mathrm{argmin}_\mathbf{x} \;
(1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + \lambda \| \mathbf{x}
\|_1 + (\mu/2) \| \mathbf{x} \|_2^2
via the ADMM problem
.. math::
\mathrm{argmin}_{\mathbf{x}, \mathbf{y}} \;
(1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + \lambda \| \mathbf{y}
\|_1 + (\mu/2) \| \mathbf{x} \|_2^2 \quad \text{such that} \quad
\mathbf{x} = \mathbf{y} \;\;.
After termination of the :meth:`solve` method, attribute
:attr:`itstat` is a list of tuples representing statistics of each
iteration. The fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value
``DFid`` : Value of data fidelity term :math:`(1/2) \| D
\mathbf{x} - \mathbf{s} \|_2^2`
``RegL1`` : Value of regularisation term :math:`\| \mathbf{x}
\|_1`
``RegL2`` : Value of regularisation term :math:`(1/2) \|
\mathbf{x} \|_2^2`
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual Residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``Time`` : Cumulative run time
"""
itstat_fields_objfn = ('ObjFun', 'DFid', 'RegL1', 'RegL2')
hdrtxt_objfn = ('Fnc', 'DFid', u('Regℓ1'), u('Regℓ2'))
hdrval_objfun = {'Fnc': 'ObjFun', 'DFid': 'DFid',
u('Regℓ1'): 'RegL1', u('Regℓ2'): 'RegL2'}
def __init__(self, D, S, lmbda=None, mu=0.0, opt=None):
"""
|
**Call graph**
.. image:: ../_static/jonga/elnet_init.svg
:width: 20%
:target: ../_static/jonga/elnet_init.svg
|
Parameters
----------
D : array_like, shape (N, M)
Dictionary matrix
S : array_like, shape (M, K)
Signal vector or matrix
lmbda : float
Regularisation parameter (l1)
mu : float
Regularisation parameter (l2)
opt : :class:`BPDN.Options` object
Algorithm options
"""
if opt is None:
opt = BPDN.Options()
# Set dtype attribute based on S.dtype and opt['DataType']
self.set_dtype(opt, S.dtype)
self.mu = self.dtype.type(mu)
super(ElasticNet, self).__init__(D, S, lmbda, opt)
def setdict(self, D):
"""Set dictionary array."""
self.D = np.asarray(D)
self.DTS = self.D.T.dot(self.S)
# Factorise dictionary for efficient solves
self.lu, self.piv = sl.cho_factor(self.D, self.mu + self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype)
def xstep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{x}`.
"""
self.X = np.asarray(sl.cho_solve_ATAI(
self.D, self.mu + self.rho, self.DTS +
self.rho * (self.Y - self.U),
self.lu, self.piv), dtype=self.dtype)
if self.opt['LinSolveCheck']:
b = self.DTS + self.rho * (self.Y - self.U)
ax = self.D.T.dot(self.D.dot(self.X)) + (self.mu+self.rho)*self.X
self.xrrs = sl.rrs(ax, b)
else:
self.xrrs = None
def obfn_reg(self):
"""Compute regularisation term and contribution to objective
function.
"""
rl1 = np.linalg.norm((self.wl1 * self.obfn_gvar()).ravel(), 1)
rl2 = 0.5 * np.linalg.norm(self.obfn_gvar())**2
return (self.lmbda*rl1 + self.mu*rl2, rl1, rl2)
def rhochange(self):
"""Re-factorise matrix when rho changes."""
self.lu, self.piv = sl.cho_factor(self.D, self.mu + self.rho)
self.lu = np.asarray(self.lu, dtype=self.dtype)
class BPDNProjL1(GenericBPDN):
r"""
ADMM algorithm for a BPDN variant with projection onto the
:math:`\ell_1` ball instead of an :math:`\ell_1` penalty.
|
.. inheritance-diagram:: BPDNProjL1
:parts: 2
|
This variant of the BPDN problem was originally referred to as the
lasso :cite:`tibshirani-1996-regression`, but that name is now also
frequently applied to the penalised form that is referred to here as
the BPDN problem.
Solve the problem
.. math::
\mathrm{argmin}_\mathbf{x} \;
(1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 \; \text{such that} \;
\| \mathbf{x} \|_1 \leq \gamma
via the ADMM problem
.. math::
\mathrm{argmin}_{\mathbf{x}, \mathbf{y}} \;
(1/2) \| D \mathbf{x} - \mathbf{s} \|_2^2 + \iota_{C(\gamma)}
(\mathbf{y}) \quad \text{such that} \quad \mathbf{x} = \mathbf{y}
\;\;,
where :math:`\iota_{C(\gamma)}(\cdot)` is the indicator function
of the :math:`\ell_1` ball of radius :math:`\gamma` about the origin.
The algorithm is very similar to that for the BPDN problem (see
:class:`BPDN`), the only difference being in the replacement in the
:math:`\mathbf{y}` step of the proximal operator of the :math:`\ell_1`
norm with the projection operator of the :math:`\ell_1` norm.
After termination of the :meth:`solve` method, attribute
:attr:`itstat` is a list of tuples representing statistics of each
iteration. The fields of the named tuple ``IterationStats`` are:
``Iter`` : Iteration number
``ObjFun`` : Objective function value :math:`(1/2) \| D
\mathbf{x} - \mathbf{s} \|_2^2`
``Cnstr`` : Constraint violation measure
``PrimalRsdl`` : Norm of primal residual
``DualRsdl`` : Norm of dual residual
``EpsPrimal`` : Primal residual stopping tolerance
:math:`\epsilon_{\mathrm{pri}}`
``EpsDual`` : Dual residual stopping tolerance
:math:`\epsilon_{\mathrm{dua}}`
``Rho`` : Penalty parameter
``Time`` : Cumulative run time
"""
class Options(GenericBPDN.Options):
"""BPDNProjL1 algorithm options
Options are the same as those defined in
:class:`.GenericBPDN.Options`.
"""
defaults = copy.deepcopy(GenericBPDN.Options.defaults)
defaults['AutoRho'].update({'RsdlTarget': 1.0})
def __init__(self, opt=None):
"""
Parameters
----------
opt : dict or None, optional (default None)
BPDNProjL1 algorithm options
"""
if opt is None:
opt = {}
GenericBPDN.Options.__init__(self, opt)
itstat_fields_objfn = ('ObjFun', 'Cnstr')
hdrtxt_objfn = ('Fnc', 'Cnstr')
hdrval_objfun = {'Fnc': 'ObjFun', 'Cnstr': 'Cnstr'}
def __init__(self, D, S, gamma, opt=None):
"""
|
**Call graph**
.. image:: ../_static/jonga/bpdnprjl1_init.svg
:width: 20%
:target: ../_static/jonga/bpdnprjl1_init.svg
|
Parameters
----------
D : array_like, shape (N, M)
Dictionary matrix
S : array_like, shape (N, K)
Signal vector or matrix
gamma : float
Constraint parameter
opt : :class:`BPDNProjL1.Options` object
Algorithm options
"""
# Set default options if necessary
if opt is None:
opt = BPDNProjL1.Options()
super(BPDNProjL1, self).__init__(D, S, opt)
self.gamma = self.dtype.type(gamma)
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
# NB: still needs to be worked out.
return np.zeros(ushape, dtype=self.dtype)
def ystep(self):
r"""Minimise Augmented Lagrangian with respect to
:math:`\mathbf{y}`.
"""
self.Y = np.asarray(sp.proj_l1(self.AX + self.U, self.gamma, axis=0),
dtype=self.dtype)
super(BPDNProjL1, self).ystep()
def eval_objfn(self):
"""Compute components of regularisation function as well | |
<filename>mobile_insight/analyzer/kpi/lte_handover_prediction_analyzer.py
#!/usr/bin/python
# Filename: lte_handover_prediction_analyzer.py
"""
A file that count the occurrences of handover triggered by different events
Author: <NAME>
"""
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from ..analyzer import *
from .kpi_analyzer import KpiAnalyzer
import copy
import pickle
#
__all__=["LteHandoverPredictionAnalyzer"]
import time
import datetime
# CONDITION_INTRA = ""
def string2timestamp(s):
# dt=datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S.%f")
# return time.mktime(dt.timetuple()) + (dt.microsecond / 1000000.0)
return time.mktime(s.timetuple()) + s.microsecond/1000000.0
class LteHandoverPredictor():
def __init__(self):
self.pred_cnt = 0
self.ho_cnt = 0
self.fp_cnt = 0 # false positive
self.fn_cnt = 0 # false negative
self.report_state = 0 # 0: initial state; 1: waiting state; 2: true state
self.pred_flag = True
self.report_time = 0
self.current_time = 0
self.a3a5_cnt = 0
self.freq = ''
def setCurrentTime(self, current_datetime):
if type(current_datetime) == type('str'):
current_datetime = datetime.datetime.strptime(current_datetime, '%Y-%m-%d %H:%M:%S.%f')
self.current_time = current_datetime.microsecond / 1e3 + time.mktime(current_datetime.timetuple()) * 1e3
def isExpired(self):
if self.pred_flag and (1000+self.report_time) < self.current_time:
self.pred_flag = False
self.fp_cnt += 1
self.report_state = 0
def recRrcRelease(self):
if self.pred_flag:
self.fp_cnt += 1
# measReportTraces = []
self.pred_flag = False
self.report_state = 0
self.a3a5_cnt = 0
# if log_item['field'] == 'measReport':
def recMeasReport(self, log_item):
if log_item['event'] == 'a3' or log_item['event'] == 'a5':
self.a3a5_cnt += 1
if log_item['event'] == 'a3':
if self.pred_flag == False:
self.pred_cnt += 1
self.pred_flag = True
self.report_time = self.current_time
self.report_state = 2
# print "{},{},{}".format(log_item['Timestamp'], current_time,'pred at a3')
else:
if self.report_state == 0:
if log_item['event'] == 'a2' and log_item['Current freq'] == log_item['freq']:
self.report_state = 1
# print "{},{}".format(log_item['Timestamp'],'intra a2')
elif self.report_state == 1:
if log_item['event'] == 'a5' and log_item['Current freq'] != None and log_item['freq'] != log_item['Current freq']:
if self.pred_flag == False:
self.pred_cnt += 1
self.pred_flag = True
self.report_time = self.current_time
self.report_state = 2
# print "{},{}".format(log_item['Timestamp'],'pred at a5')
elif log_item['event'] == 'a1' and log_item['Current freq'] == log_item['freq']:
report_state = 0
# if log_item['field'] == 'measControl':
def recMeasControl(self):
if self.a3a5_cnt > 0:
self.ho_cnt += 1
if self.pred_flag == False:
self.fn_cnt += 1
self.a3a5_cnt = 0
self.pred_flag = False
self.report_state = 0 # 0: initial state; 1: waiting state; 2: true state
# print "{},{},{},{}".format(ho_cnt,pred_cnt,fp_cnt,tn_cnt)
# def uploadKpi(self, analyzer, ho_cond, ho_occur):
# kpi = {}
# # self.attributes = ["predict_condition", "handover_occurence"]
# kpi['predict_condition'] = ho_cond
# kpi['handover_occurence'] = ho_occur
# analyzer.upload_kpi("KPI.Mobility.HANDOVER_LATENCY", kpi)
class LteHandoverPredictionAnalyzer(KpiAnalyzer):
"""
A function analyzer that models mobility management.
It offers two functions
(1) Mobility event notification
(2) A complete mobility history and configuration query
(3) A handoff rule inference module
"""
"""
Development plan:
Step 1: make it work in single cell, LTE only
Step 2: support cell change with loading/saving state machine for different freq under LTE
Step 3: support in-RAT
"""
def __init__(self):
KpiAnalyzer.__init__(self)
self.handoverMsg = []
self.__handoff_sample = HandoffSample()
self.__mobility_state_machine = MobilityStateMachine()
self.__b_prediction = True #handoff prediction is disabled by default
self.__predict_target = None #predicted target cell
# self.__print = False
#include analyzers
# self.include_analyzer("WcdmaRrcAnalyzer",[self.__on_wcdma_rrc_msg])
self.include_analyzer("LteRrcAnalyzer",[self.__on_lte_rrc_msg])
self.include_analyzer("TrackCellInfoAnalyzer",[])
# self.include_analyzer("LteNasAnalyzer",[self.__on_lte_nas_msg])
# self.include_analyzer("UmtsNasAnalyzer",[self.__on_umts_nas_msg])
self.ho_predictor = LteHandoverPredictor()
self.attributes = ["predict_condition", "handover_occurence"]
#no source callbacks are included
self.register_kpi("Mobility", "HANDOVER_PREDICTION", self.__on_lte_rrc_msg, 0)
def reset(self):
"""
Reset the state machine
"""
self.__mobility_state_machine.reset()
self.__handoff_sample = HandoffSample()
def set_handoff_prediction(self,b_predict):
"""
Enable/disable handoff prediction
:param b_prediction: True if prediction should be enabled, False otherwise
:type b_prediction: boolean
"""
self.__b_prediction = b_predict
def __on_lte_rrc_msg(self,msg):
"""
Handle LTE RRC messages.
It updates the mobility state,
recovers the partial state transition,
and then the entire handoff mobility graph
:param msg: the event (message) from the trace collector.
"""
#The message from LteRrcAnalyzer is decoded XML messages
# if self.__print == False:
# print msg.data.decode()
# self.__print = True
# if self.ho_predictor.
for field in msg.data.iter('field'):
if field.get('name') == "lte-rrc.rrcConnectionRelease_element" or field.get('name') == 'lte-rrc.rrcConnectionRequest_element':
self.ho_predictor.setCurrentTime(msg.timestamp)
self.ho_predictor.recRrcRelease()
if field.get('name')=="lte-rrc.mobilityControlInfo_element":
#A handoff command: create a new HandoffState
target_cell = None
target_cell_id = None
target_bw = None
for val in field.iter('field'):
#Currently we focus on freq-level handoff
if val.get('name')=='lte-rrc.dl_Bandwidth':
target_bw = val.get('show')
if val.get('name')=='lte-rrc.dl_CarrierFreq':
target_cell = val.get('show')
if val.get('name')=='lte-rrc.targetPhysCellId':
target_cell_id = int(val.get('show'))
if not target_cell:
#In T-Mobile, some logs does not carry dl_CarrierFreq.
#These are intra-frequency handoff
target_cell = self.get_analyzer("LteRrcAnalyzer").get_cur_cell().freq
if target_cell:
#FIXME: consider 4G->3G handover (e.g., SRVCC, CSFB)
meas_state_tmp = copy.deepcopy(self.__handoff_sample.cur_state)
handoff_state = HandoffState("LTE",target_cell)
self.__handoff_sample.add_state_transition(handoff_state)
#Trigger merging function
self.__mobility_state_machine.update_state_machine(self.__handoff_sample)
#Reset handoff sample
self.__handoff_sample = HandoffSample()
self.__handoff_sample.cur_state = copy.deepcopy(meas_state_tmp)
bcast_dict = {}
bcast_dict['Timestamp'] = str(string2timestamp(msg.timestamp))
bcast_dict['event'] = str(handoff_state.dump())
# self.broadcast_info('HANDOVER_EVENT',bcast_dict)
# self.log_info(str(string2timestamp(msg.timestamp))+" Handoff to " + handoff_state.dump())
# self.log_info(target_bw)
# Broadcast to apps
# cur_cell_freq = self.get_analyzer("LteRrcAnalyzer").ql_get_cur_freq()
# cur_cell_id = self.get_analyzer("LteRrcAnalyzer").ql_get_cur_cellid()
cur_cell_freq = self.get_analyzer("LteRrcAnalyzer").get_cur_cell().freq
cur_cell_id = self.get_analyzer("LteRrcAnalyzer").get_cur_cell().id
# print cur_cell_freq
# print self.get_analyzer("LteRrcAnalyzer").get_cur_cell().freq
# self.ho_predictor.recMeasControl({'Current Cell ID':cur_cell_id, 'Current Freq':cur_cell_freq, 'Target Cell Id':target_cell_id, 'Target Radio':handoff_state.rat, 'Target Freq':int(handoff_state.freq), 'Target bandwidth':target_bw})
self.ho_predictor.recMeasControl()
return
if field.get('name')=="lte-rrc.measurementReport_element":
#A measurement report: parse it, push it into Handoff sample
meas_id = None
rss = None
neighborCells = []
maxRss = -140
curNeighborCell = None
for val in field.iter('field'):
if val.get('name')=='lte-rrc.measId':
meas_id = val.get('show')
if val.get('name')=='lte-rrc.rsrpResult':
rss_tmp = int(val.get('show'))-140
if curNeighborCell == None:
rss = rss_tmp
else:
if neighborCells == [] or rss_tmp > maxRss:
neighborCells = [curNeighborCell]
maxRss = rss_tmp
elif neighborCells != [] and rss_tmp == maxRss:
neighborCells.append(curNeighborCell)
# neighCellDict[neighCell] = rss_tmp
if val.get('name')=='lte-rrc.physCellId':
curNeighborCell = int(val.get('show'))
if meas_id and self.__handoff_sample.cur_state:
meas_report = self.__handoff_sample.cur_state.get_meas_report_obj(meas_id)
self.__handoff_sample.add_meas_report(meas_report)
bandwidth = None
# self.log_info(meas_id+" "+str(self.__handoff_sample.cur_state.measid_list)+" "+str(meas_report))
# cur_cell_freq = self.get_analyzer("LteRrcAnalyzer").ql_get_cur_freq()
cur_cell_freq = self.get_analyzer("LteRrcAnalyzer").get_cur_cell().freq
bandwidth = None
meas_obj_freq = None
if meas_report[0]: #meas obj is known
try:
bandwidth = meas_report[0].measBandwidth
meas_obj_freq = meas_report[0].freq
except:
meas_obj_freq = None
# print 'Error'
# print "{},{},{}".format(msg.timestamp,meas_id,meas_obj_freq)
# self.log_info("Measurement obj bandwidth:"+" "+bandwidth)
if meas_report[1]: #report config is known
# self.log_info(str(string2timestamp(msg.timestamp))+" Measurement report "+str(meas_report[1].event_list[0].type)+" "+str(rss))
# Broadcast to apps
# print neighborCells
old_flag = self.ho_predictor.pred_flag
self.ho_predictor.recMeasReport({'event':str(meas_report[1].event_list[0].type), 'neighbor cells':neighborCells, 'Current freq':cur_cell_freq, 'freq':meas_obj_freq, 'bandwidth':bandwidth})
if not old_flag and self.ho_predictor.pred_flag:
bcast_dict = {}
bcast_dict['Timestamp'] = str(string2timestamp(msg.timestamp))
bcast_dict['event'] = str(meas_report[1].event_list[0].type)
self.store_kpi("KPI_Mobility_HANDOVER_PREDICTION", "1", msg.timestamp)
# self.broadcast_info('HANDOVER_PREDICTION', bcast_dict)
# self.log_error(str(string2timestamp(msg.timestamp))+" Handover will occur" )
# bcast_dict = {}
# bcast_dict['Timestamp']=str(msg.timestamp)
# bcast_dict['event'] = str(meas_report[1].event_list[0].type)
# bcast_dict['rss'] = str(rss)
# self.broadcast_info('MEAS_REPORT',bcast_dict)
# if field.get('name')=="lte-rrc.measResultsCDMA2000_element":
# rss = None
# for val in field.iter('field'):
# if val.get('name')=='lte-rrc.pilotStrength':
# rss = val.get('show')
# #CDMA2000 measurement report
# #NOTE: Different from normal meas report, this one does not have measid/reportid
# tmp = LteReportConfig("CDMA2000",0)
# tmp.add_event("CDMA2000",0)
# meas_report = (LteMeasObjectCDMA2000(None,0),tmp) #fake an empty report
# self.__handoff_sample.add_meas_report(meas_report)
# self.log_info(str(string2timestamp(msg.timestamp))+" Measurement report cdma2000 "+str(rss))
#TODO: broadcast to apps
if field.get('name')=="lte-rrc.measConfig_element":
#A Measurement control reconfiguration
meas_state = None
if self.__handoff_sample.cur_state:
#Meas control may take stateful addition/deletion,
#So need current copy whenever available
meas_state = copy.deepcopy(self.__handoff_sample.cur_state)
else:
meas_state = MeasState()
for val in field.iter('field'):
if val.get('name')=='lte-rrc.MeasObjectToAddMod_element':
#Add measurement object
meas_obj = self.__get_meas_obj(val)
if meas_obj:
meas_state.measobj[meas_obj.obj_id] = meas_obj
if val.get('name')=='lte-rrc.measObjectToRemoveList':
#Remove measurement object
for item in val.iter('field'):
if item.get('name')=='lte-rrc.MeasObjectId' \
and item.get('show') in meas_state.measobj:
del meas_state.measobj[item.get('show')]
if val.get('name')=='lte-rrc.ReportConfigToAddMod_element':
#Add/modify a report config
report_config = self.__get_report_config(val)
if report_config:
meas_state.report_list[report_config.report_id]=report_config
if val.get('name')=='lte-rrc.reportConfigToRemoveList':
#Remove a report config
for item in val.iter('field'):
if item.get('name')=='lte-rrc.ReportConfigId' \
and item.get('show') in meas_state.report_list:
del meas_state.report_list[item.get('show')]
if val.get('name')=='lte-rrc.MeasIdToAddMod_element':
#Add a measurement ID
meas_id = -1
meas_obj_id = -1
report_id = -1
for item in val.iter('field'):
if item.get('name')=='lte-rrc.measId':
meas_id = item.get('show')
if item.get('name')=='lte-rrc.measObjectId':
meas_obj_id = item.get('show')
if item.get('name')=='lte-rrc.reportConfigId':
report_id = item.get('show')
meas_state.measid_list[meas_id]=(meas_obj_id,report_id)
# self.log_info('Add:'+ meas_id+','+report_id)
if val.get('name')=='lte-rrc.measIdToRemoveList':
#Remove a measurement ID
# self.log_info(str(meas_state.measid_list))
for item in val.iter('field'):
# if item.get('name')=='lte-rrc.MeasId':
# self.log_info("In remove list:"+item.get('show'))
if item.get('name')=='lte-rrc.MeasId' and item.get('show') in meas_state.measid_list:
del meas_state.measid_list[item.get('show')]
# self.log_info("Remove:"+item.get('show'))
#Generate a new state to the handoff sample
self.__handoff_sample.add_state_transition(meas_state)
# self.__mobility_state_machine.update_state_machine(self.__handoff_sample)
# #Reset handoff sample
# self.__handoff_sample = HandoffSample()
# self.log_info(str(string2timestamp(msg.timestamp))+" Measurement control")
# self.log_info("Meas State: \n"+meas_state.dump())
# Broadcast to apps
# bcast_dict = {}
# bcast_dict['Timestamp']=str(msg.timestamp)
# bcast_dict['Control info'] = meas_state.dump()
# self.broadcast_info('MEAS_CTRL',bcast_dict)
def __get_meas_obj(self,msg):
"""
Parse MeasObjectToAddMod_element, return a measurement object
:param msg: the XML msg with MeasObjectToAddMod_element
:returns: a measurement objects to be added
"""
measobj_id = -1
for field in msg.iter('field'):
if field.get('name') == "lte-rrc.measObjectId":
measobj_id = field.get('show')
if field.get('name') == "lte-rrc.measObjectEUTRA_element":
#A LTE meas obj
field_val = {}
field_val['lte-rrc.carrierFreq'] = None
field_val['lte-rrc.offsetFreq'] = 0
for val in | |
disk in drive' waning on windows
"""
# StackOverflow Ref: https://goo.gl/9gYdef
if _platform == "win32":
kernel32.SetThreadErrorMode(SEM_FAIL, ctypes.byref(oldmode))
@staticmethod
def resume_windows_warning():
"""
Resumes waning on windows
"""
if _platform == "win32":
# Resume windows error
kernel32.SetThreadErrorMode(oldmode, ctypes.byref(oldmode))
@staticmethod
def _skip_file_extension(
file_type, supported_types, folders_only, file_extension):
"""
Used internally by get_files_in_path to check if
the file extn to be skipped
"""
return file_type is not None and file_type != "*" and (
folders_only or len(supported_types) > 0 and
file_extension not in supported_types or
file_type != file_extension)
@staticmethod
def get_files_in_path(
show_hidden_files, files_only, folders_only, supported_types,
file_type, user_dir, orig_path):
"""
Get list of files and dirs in the path
:param show_hidden_files: boolean
:param files_only: boolean
:param folders_only: boolean
:param supported_types: array of supported types
:param file_type: file type
:param user_dir: base user dir
:param orig_path: path after user dir
:return:
"""
files = {}
for f in sorted(os.listdir(orig_path)):
system_path = os.path.join(os.path.join(orig_path, f))
# continue if file/folder is hidden (based on user preference)
if not show_hidden_files and is_folder_hidden(system_path):
continue
user_path = os.path.join(os.path.join(user_dir, f))
created = time.ctime(os.path.getctime(system_path))
modified = time.ctime(os.path.getmtime(system_path))
file_extension = str(splitext(system_path))
# set protected to 1 if no write or read permission
protected = 0
if (not os.access(system_path, os.R_OK) or
not os.access(system_path, os.W_OK)):
protected = 1
# list files only or folders only
if os.path.isdir(system_path):
if files_only == 'true':
continue
file_extension = "dir"
user_path = "{0}/".format(user_path)
# filter files based on file_type
elif Filemanager._skip_file_extension(
file_type, supported_types, folders_only, file_extension):
continue
# create a list of files and folders
files[f] = {
"Filename": f,
"Path": user_path,
"file_type": file_extension,
"Protected": protected,
"Properties": {
"Date Created": created,
"Date Modified": modified,
"Size": sizeof_fmt(getsize(system_path))
}
}
return files
@staticmethod
def list_filesystem(in_dir, path, trans_data, file_type, show_hidden):
"""
It lists all file and folders within the given
directory.
"""
Filemanager.suspend_windows_warning()
is_show_hidden_files = show_hidden
path = unquote(path)
try:
Filemanager.check_access_permission(in_dir, path)
except Exception as e:
Filemanager.resume_windows_warning()
files = {
'Code': 0,
'Error': str(e)
}
return files
files = {}
if (_platform == "win32" and (path == '/' or path == '\\'))\
and in_dir is None:
drives = Filemanager._get_drives_with_size()
for drive, drive_size in drives:
path = file_name = "{0}:".format(drive)
files[file_name] = {
"Filename": file_name,
"Path": path,
"file_type": 'drive',
"Protected": 1 if drive_size == 0 else 0,
"Properties": {
"Date Created": "",
"Date Modified": "",
"Size": drive_size
}
}
Filemanager.resume_windows_warning()
return files
orig_path = Filemanager.get_abs_path(in_dir, path)
if not path_exists(orig_path):
Filemanager.resume_windows_warning()
return {
'Code': 0,
'Error': gettext("'{0}' file does not exist.").format(path)
}
user_dir = path
folders_only = trans_data.get('folders_only', '')
files_only = trans_data.get('files_only', '')
supported_types = trans_data.get('supported_types', [])
orig_path = unquote(orig_path)
try:
files = Filemanager.get_files_in_path(
is_show_hidden_files, files_only, folders_only,
supported_types, file_type, user_dir, orig_path
)
except Exception as e:
Filemanager.resume_windows_warning()
err_msg = str(e)
if (hasattr(e, 'strerror') and
e.strerror == gettext('Permission denied')):
err_msg = str(e.strerror)
files = {
'Code': 0,
'Error': err_msg
}
Filemanager.resume_windows_warning()
return files
@staticmethod
def check_access_permission(in_dir, path):
if not config.SERVER_MODE:
return
in_dir = '' if in_dir is None else in_dir
orig_path = Filemanager.get_abs_path(in_dir, path)
# This translates path with relative path notations
# like ./ and ../ to absolute path.
orig_path = os.path.abspath(orig_path)
if in_dir:
if _platform == 'win32':
if in_dir[-1] == '\\' or in_dir[-1] == '/':
in_dir = in_dir[:-1]
else:
if in_dir[-1] == '/':
in_dir = in_dir[:-1]
# Do not allow user to access outside his storage dir
# in server mode.
if not orig_path.startswith(in_dir):
raise InternalServerError(
gettext("Access denied ({0})").format(path))
@staticmethod
def get_abs_path(in_dir, path):
if (path.startswith('\\\\') and _platform == 'win32')\
or config.SERVER_MODE is False or in_dir is None:
return "{}".format(path)
if path == '/' or path == '\\':
if _platform == 'win32':
if in_dir.endswith('\\') or in_dir.endswith('/'):
return "{}".format(in_dir)
else:
return "{}{}".format(in_dir, '\\')
else:
if in_dir.endswith('/'):
return "{}".format(in_dir)
else:
return "{}{}".format(in_dir, '/')
if in_dir.endswith('/') or in_dir.endswith('\\'):
if path.startswith('/') or path.startswith('\\'):
return "{}{}".format(in_dir[:-1], path)
else:
return "{}/{}".format(in_dir, path)
else:
if path.startswith('/') or path.startswith('\\'):
return "{}{}".format(in_dir, path)
else:
return "{}/{}".format(in_dir, path)
def validate_request(self, capability):
"""
It validates the capability with the capabilities
stored in the session
"""
trans_data = Filemanager.get_trasaction_selection(self.trans_id)
return False if capability not in trans_data['capabilities'] else True
def getinfo(self, path=None, get_size=True, name=None, req=None):
"""
Returns a JSON object containing information
about the given file.
"""
date_created = 'Date Created'
date_modified = 'Date Modified'
path = unquote(path)
if self.dir is None:
self.dir = ""
orig_path = "{0}{1}".format(self.dir, path)
try:
Filemanager.check_access_permission(self.dir, path)
except Exception as e:
thefile = {
'Filename': split_path(path)[-1],
'FileType': '',
'Path': path,
'Error': str(e),
'Code': 0,
'Info': '',
'Properties': {
date_created: '',
date_modified: '',
'Width': '',
'Height': '',
'Size': ''
}
}
return thefile
user_dir = path
thefile = {
'Filename': split_path(orig_path)[-1],
'FileType': '',
'Path': user_dir,
'Error': '',
'Code': 1,
'Info': '',
'Properties': {
date_created: '',
date_modified: '',
'Width': '',
'Height': '',
'Size': ''
}
}
if not path_exists(orig_path):
thefile['Error'] = gettext(
"'{0}' file does not exist.").format(path)
thefile['Code'] = -1
return thefile
if split_path(user_dir)[-1] == '/'\
or os.path.isfile(orig_path) is False:
thefile['FileType'] = 'Directory'
else:
thefile['FileType'] = splitext(user_dir)
created = time.ctime(os.path.getctime(orig_path))
modified = time.ctime(os.path.getmtime(orig_path))
thefile['Properties'][date_created] = created
thefile['Properties'][date_modified] = modified
thefile['Properties']['Size'] = sizeof_fmt(getsize(orig_path))
return thefile
def getfolder(self, path=None, file_type="", name=None, req=None,
show_hidden=False):
"""
Returns files and folders in give path
"""
trans_data = Filemanager.get_trasaction_selection(self.trans_id)
the_dir = None
if config.SERVER_MODE:
the_dir = self.dir
if the_dir is not None and not the_dir.endswith('/'):
the_dir += '/'
filelist = self.list_filesystem(
the_dir, path, trans_data, file_type, show_hidden)
return filelist
def rename(self, old=None, new=None, req=None):
"""
Rename file or folder
"""
if not self.validate_request('rename'):
return self.ERROR_NOT_ALLOWED
the_dir = self.dir if self.dir is not None else ''
try:
Filemanager.check_access_permission(the_dir, old)
Filemanager.check_access_permission(the_dir, new)
except Exception as e:
res = {
'Error': str(e),
'Code': 0
}
return res
# check if it's dir
if old[-1] == '/':
old = old[:-1]
# extract filename
oldname = split_path(old)[-1]
path = old
path = split_path(path)[0] # extract path
if not path[-1] == '/':
path += '/'
newname = new
newpath = path + newname
# make system old path
oldpath_sys = "{0}{1}".format(the_dir, old)
newpath_sys = "{0}{1}".format(the_dir, newpath)
error_msg = gettext('Renamed successfully.')
code = 1
try:
os.rename(oldpath_sys, newpath_sys)
except Exception as e:
code = 0
error_msg = "{0} {1}".format(
gettext('There was an error renaming the file:'), e)
result = {
'Old Path': old,
'Old Name': oldname,
'New Path': newpath,
'New Name': newname,
'Error': error_msg,
'Code': code
}
return result
def delete(self, path=None, req=None):
"""
Delete file or folder
"""
if not self.validate_request('delete'):
return self.ERROR_NOT_ALLOWED
the_dir = self.dir if self.dir is not None else ''
orig_path = "{0}{1}".format(the_dir, path)
try:
Filemanager.check_access_permission(the_dir, path)
except Exception as e:
res = {
'Error': str(e),
'Code': 0
}
return res
err_msg = ''
code = 1
try:
if os.path.isdir(orig_path):
os.rmdir(orig_path)
else:
os.remove(orig_path)
except Exception as e:
code = 0
err_msg = str(e.strerror)
result = {
'Path': path,
'Error': err_msg,
'Code': code
}
return result
def add(self, req=None):
"""
File upload functionality
"""
if not self.validate_request('upload'):
return self.ERROR_NOT_ALLOWED
the_dir = self.dir if self.dir is not None else ''
err_msg = ''
code = 1
try:
path = req.form.get('currentpath')
file_obj = req.files['newfile']
file_name = file_obj.filename
orig_path = "{0}{1}".format(the_dir, path)
new_name = "{0}{1}".format(orig_path, file_name)
try:
# Check if the new file is inside the users directory
pathlib.Path(new_name).relative_to(the_dir)
except ValueError as _:
return self.ERROR_NOT_ALLOWED
with open(new_name, 'wb') as f:
while True:
# 4MB chunk (4 * 1024 * 1024 Bytes)
data = file_obj.read(4194304)
if not data:
break
f.write(data)
except Exception as e:
code = 0
err_msg = str(e.strerror) if hasattr(e, 'strerror') else str(e)
try:
Filemanager.check_access_permission(the_dir, path)
except Exception as e:
res = {
'Error': str(e),
'Code': 0
}
return res
result = {
'Path': path,
'Name': new_name,
'Error': err_msg,
'Code': code
}
return result
def is_file_exist(self, path, name, req=None):
"""
Checks whether given file exists or not
"""
the_dir = self.dir if self.dir is not None else ''
err_msg = ''
code = 1
name = unquote(name)
path = unquote(path)
try:
orig_path = "{0}{1}".format(the_dir, path)
Filemanager.check_access_permission(
the_dir, "{}{}".format(path, name))
new_name = "{0}{1}".format(orig_path, name)
if not os.path.exists(new_name):
code = 0
except Exception as e:
| |
that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
:param 'DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionArgs' required_during_scheduling_ignored_during_execution: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
if preferred_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "preferred_during_scheduling_ignored_during_execution", preferred_during_scheduling_ignored_during_execution)
if required_during_scheduling_ignored_during_execution is not None:
pulumi.set(__self__, "required_during_scheduling_ignored_during_execution", required_during_scheduling_ignored_during_execution)
@property
@pulumi.getter(name="preferredDuringSchedulingIgnoredDuringExecution")
def preferred_during_scheduling_ignored_during_execution(self) -> Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution']]:
"""
The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
"""
return pulumi.get(self, "preferred_during_scheduling_ignored_during_execution")
@property
@pulumi.getter(name="requiredDuringSchedulingIgnoredDuringExecution")
def required_during_scheduling_ignored_during_execution(self) -> Optional['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution']:
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
return pulumi.get(self, "required_during_scheduling_ignored_during_execution")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution(dict):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
"""
def __init__(__self__, *,
preference: 'outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference',
weight: int):
"""
An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
:param 'DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceArgs' preference: A node selector term, associated with the corresponding weight.
:param int weight: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
pulumi.set(__self__, "preference", preference)
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter
def preference(self) -> 'outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference':
"""
A node selector term, associated with the corresponding weight.
"""
return pulumi.get(self, "preference")
@property
@pulumi.getter
def weight(self) -> int:
"""
Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
"""
return pulumi.get(self, "weight")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreference(dict):
"""
A node selector term, associated with the corresponding weight.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']] = None,
match_fields: Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']] = None):
"""
A node selector term, associated with the corresponding weight.
:param Sequence['DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressionsArgs'] match_expressions: A list of node selector requirements by node's labels.
:param Sequence['DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFieldsArgs'] match_fields: A list of node selector requirements by node's fields.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_fields is not None:
pulumi.set(__self__, "match_fields", match_fields)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions']]:
"""
A list of node selector requirements by node's labels.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchFields")
def match_fields(self) -> Optional[Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields']]:
"""
A list of node selector requirements by node's fields.
"""
return pulumi.get(self, "match_fields")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchExpressions(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionPreferenceMatchFields(dict):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: The label key that the selector applies to.
:param str operator: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
:param Sequence[str] values: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
The label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution(dict):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
"""
def __init__(__self__, *,
node_selector_terms: Sequence['outputs.DatadogAgentSpecClusterChecksRunnerAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecutionNodeSelectorTerms']):
"""
If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If | |
setattr(new_version, attr, data)
return new_version
def get_active_version(self):
"""
Returns the active version of the motion.
If no active version is set by now, the last_version is used.
"""
if self.active_version:
return self.active_version
else:
return self.get_last_version()
def get_last_version(self):
"""
Return the newest version of the motion.
"""
try:
return self.versions.order_by('-version_number')[0]
except IndexError:
return self.get_new_version()
def is_submitter(self, user):
"""
Returns True if user is a submitter of this motion, else False.
"""
return user in self.submitters.all()
def is_supporter(self, user):
"""
Returns True if user is a supporter of this motion, else False.
"""
return user in self.supporters.all()
def create_poll(self):
"""
Create a new poll for this motion.
Return the new poll object.
"""
if self.state.allow_create_poll:
poll = MotionPoll.objects.create(motion=self)
poll.set_options()
return poll
else:
raise WorkflowError('You can not create a poll in state %s.' % self.state.name)
@property
def workflow(self):
"""
Returns the id of the workflow of the motion.
"""
# TODO: Rename to workflow_id
return self.state.workflow.pk
def set_state(self, state):
"""
Set the state of the motion.
'state' can be the id of a state object or a state object.
"""
if type(state) is int:
state = State.objects.get(pk=state)
if not state.dont_set_identifier:
self.set_identifier()
self.state = state
def reset_state(self, workflow=None):
"""
Set the state to the default state.
'workflow' can be a workflow, an id of a workflow or None.
If the motion is new and workflow is None, it chooses the default
workflow from config.
"""
if type(workflow) is int:
workflow = Workflow.objects.get(pk=workflow)
if workflow is not None:
new_state = workflow.first_state
elif self.state:
new_state = self.state.workflow.first_state
else:
new_state = (Workflow.objects.get(pk=config['motions_workflow']).first_state or
Workflow.objects.get(pk=config['motions_workflow']).states.all()[0])
self.set_state(new_state)
def get_agenda_title(self):
"""
Return a simple title string for the agenda.
Returns only the motion title so that you have only agenda item number
and title in the agenda.
"""
return str(self)
def get_agenda_list_view_title(self):
"""
Return a title string for the agenda list view.
Returns only the motion title so that you have agenda item number,
title and motion identifier in the agenda.
Note: It has to be the same return value like in JavaScript.
"""
if self.identifier:
string = '%s (%s %s)' % (self.title, _(self._meta.verbose_name), self.identifier)
else:
string = '%s (%s)' % (self.title, _(self._meta.verbose_name))
return string
@property
def agenda_item(self):
"""
Returns the related agenda item.
"""
content_type = ContentType.objects.get_for_model(self)
return Item.objects.get(object_id=self.pk, content_type=content_type)
@property
def agenda_item_id(self):
"""
Returns the id of the agenda item object related to this object.
"""
return self.agenda_item.pk
def get_allowed_actions(self, person):
"""
Return a dictonary with all allowed actions for a specific person.
The dictonary contains the following actions.
* see
* update / edit
* delete
* create_poll
* support
* unsupport
* change_state
* reset_state
NOTE: If you update this function please also update the
'isAllowed' function on client side in motions/site.js.
"""
# TODO: Remove this method and implement these things in the views.
actions = {
'see': (person.has_perm('motions.can_see') and
(not self.state.required_permission_to_see or
person.has_perm(self.state.required_permission_to_see) or
self.is_submitter(person))),
'update': (person.has_perm('motions.can_manage') or
(self.is_submitter(person) and
self.state.allow_submitter_edit)),
'delete': person.has_perm('motions.can_manage'),
'create_poll': (person.has_perm('motions.can_manage') and
self.state.allow_create_poll),
'support': (self.state.allow_support and
config['motions_min_supporters'] > 0 and
not self.is_submitter(person) and
not self.is_supporter(person)),
'unsupport': (self.state.allow_support and
self.is_supporter(person)),
'change_state': person.has_perm('motions.can_manage'),
'reset_state': person.has_perm('motions.can_manage')}
actions['edit'] = actions['update']
return actions
def write_log(self, message_list, person=None):
"""
Write a log message.
The message should be in English and translatable,
e. g. motion.write_log(message_list=[ugettext_noop('Message Text')])
"""
MotionLog.objects.create(motion=self, message_list=message_list, person=person)
def is_amendment(self):
"""
Returns True if the motion is an amendment.
A motion is a amendment if amendments are activated in the config and
the motion has a parent.
"""
return config['motions_amendments_enabled'] and self.parent is not None
def get_search_index_string(self):
"""
Returns a string that can be indexed for the search.
"""
return " ".join((
self.title or '',
self.text or '',
self.reason or '',
str(self.category) if self.category else '',
user_name_helper(self.submitters.all()),
user_name_helper(self.supporters.all()),
" ".join(tag.name for tag in self.tags.all())))
class MotionVersion(RESTModelMixin, models.Model):
"""
A MotionVersion object saves some date of the motion.
"""
motion = models.ForeignKey(
Motion,
on_delete=models.CASCADE,
related_name='versions')
"""The motion to which the version belongs."""
version_number = models.PositiveIntegerField(default=1)
"""An id for this version in realation to a motion.
Is unique for each motion.
"""
title = models.CharField(max_length=255)
"""The title of a motion."""
text = models.TextField()
"""The text of a motion."""
reason = models.TextField(null=True, blank=True)
"""The reason for a motion."""
creation_time = models.DateTimeField(auto_now=True)
"""Time when the version was saved."""
class Meta:
default_permissions = ()
unique_together = ("motion", "version_number")
def __str__(self):
"""Return a string, representing this object."""
counter = self.version_number or ugettext_lazy('new')
return "Motion %s, Version %s" % (self.motion_id, counter)
@property
def active(self):
"""Return True, if the version is the active version of a motion. Else: False."""
return self.active_version.exists()
def get_root_rest_element(self):
"""
Returns the motion to this instance which is the root REST element.
"""
return self.motion
class Category(RESTModelMixin, models.Model):
"""
Model for categories of motions.
"""
access_permissions = CategoryAccessPermissions()
name = models.CharField(max_length=255)
"""Name of the category."""
prefix = models.CharField(blank=True, max_length=32)
"""Prefix of the category.
Used to build the identifier of a motion.
"""
class Meta:
default_permissions = ()
ordering = ['prefix']
def __str__(self):
return self.name
class MotionLog(RESTModelMixin, models.Model):
"""Save a logmessage for a motion."""
motion = models.ForeignKey(
Motion,
on_delete=models.CASCADE,
related_name='log_messages')
"""The motion to witch the object belongs."""
message_list = JSONField()
"""
The log message. It should be a list of strings in English.
"""
person = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
null=True)
"""A user object, who created the log message. Optional."""
time = models.DateTimeField(auto_now=True)
"""The Time, when the loged action was performed."""
class Meta:
default_permissions = ()
ordering = ['-time']
def __str__(self):
"""
Return a string, representing the log message.
"""
time = formats.date_format(self.time, 'DATETIME_FORMAT')
time_and_messages = '%s ' % time + ''.join(map(_, self.message_list))
if self.person is not None:
return _('%(time_and_messages)s by %(person)s') % {'time_and_messages': time_and_messages,
'person': self.person}
return time_and_messages
def get_root_rest_element(self):
"""
Returns the motion to this instance which is the root REST element.
"""
return self.motion
class MotionVote(RESTModelMixin, BaseVote):
"""Saves the votes for a MotionPoll.
There should allways be three MotionVote objects for each poll,
one for 'yes', 'no', and 'abstain'."""
option = models.ForeignKey(
'MotionOption',
on_delete=models.CASCADE)
"""The option object, to witch the vote belongs."""
class Meta:
default_permissions = ()
def get_root_rest_element(self):
"""
Returns the motion to this instance which is the root REST element.
"""
return self.option.poll.motion
class MotionOption(RESTModelMixin, BaseOption):
"""Links between the MotionPollClass and the MotionVoteClass.
There should be one MotionOption object for each poll."""
poll = models.ForeignKey(
'MotionPoll',
on_delete=models.CASCADE)
"""The poll object, to witch the object belongs."""
vote_class = MotionVote
"""The VoteClass, to witch this Class links."""
class Meta:
default_permissions = ()
def get_root_rest_element(self):
"""
Returns the motion to this instance which is the root REST element.
"""
return self.poll.motion
class MotionPoll(RESTModelMixin, CollectDefaultVotesMixin, BasePoll):
"""The Class to saves the vote result for a motion poll."""
motion = models.ForeignKey(
Motion,
on_delete=models.CASCADE,
related_name='polls')
"""The motion to witch the object belongs."""
option_class = MotionOption
"""The option class, witch links between this object the the votes."""
vote_values = ['Yes', 'No', 'Abstain']
"""The possible anwers for the poll. 'Yes, 'No' and 'Abstain'."""
class Meta:
default_permissions = ()
def __str__(self):
"""
Representation method only for debugging purposes.
"""
return 'MotionPoll for motion %s' % self.motion
def set_options(self):
"""Create the option class for this poll."""
# TODO: maybe it is possible with .create() to call this without poll=self
# or call this in save()
self.get_option_class()(poll=self).save()
def get_percent_base_choice(self):
return config['motions_poll_100_percent_base']
def get_slide_context(self, **context):
return super(MotionPoll, self).get_slide_context(poll=self)
def get_root_rest_element(self):
"""
Returns the motion to this instance which is the root REST element.
"""
return self.motion
class State(RESTModelMixin, models.Model):
"""
Defines a state for a motion.
Every state belongs to a workflow. All states of a workflow are linked together
via 'next_states'. One of these states is the first state, but this
is saved in the workflow table (one-to-one relation). In every state
you can configure some handling of a motion. See the following fields
for more information.
"""
name = models.CharField(max_length=255)
"""A string representing the state."""
action_word = models.CharField(max_length=255)
"""An alternative string to be used for a button to switch to this state."""
workflow | |
output.append(item)
try:
raw = json.loads(json.dumps(output, cls=DatetimeEncoder))
except ValueError as e:
return_error('Could not decode/encode the raw response - {err_msg}'.format(err_msg=e))
ec = {'AWS.EC2.DeletedFleets': raw}
human_readable = tableToMarkdown('AWS Deleted Fleets', data)
return_outputs(human_readable, ec)
def describe_fleets_command(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
obj = vars(client._client_config) # noqa:F841
data = []
kwargs = {}
output = []
if args.get('filters') is not None:
kwargs.update({'Filters': parse_filter_field(args.get('filters'))})
if args.get('FleetIds') is not None:
kwargs.update({'FleetIds': parse_resource_ids(args.get('FleetIds'))})
if args.get('MaxResults') is not None:
kwargs.update({'MaxResults': args.get('MaxResults')})
if args.get('NextToken') is not None:
kwargs.update({'NextToken': args.get('NextToken')})
response = client.describe_fleets(**kwargs)
for i, item in enumerate(response['Fleets']):
data.append({
'ActivityStatus': item['ActivityStatus'] if 'ActivityStatus' in item.keys() is not None else "None",
'FleetId': item['FleetId'],
'FleetState': item['FleetState'],
'FulfilledCapacity': item['FulfilledCapacity'],
'FulfilledOnDemandCapacity': item['FulfilledOnDemandCapacity'],
'LaunchTemplateId': item['LaunchTemplateConfigs'][0]['LaunchTemplateSpecification'][
'LaunchTemplateId'],
'CreateTime': datetime.strftime(item['CreateTime'], '%Y-%m-%dT%H:%M:%SZ'),
'TotalTargetCapacity': item['TargetCapacitySpecification']['TotalTargetCapacity'],
'OnDemandTargetCapacity': item['TargetCapacitySpecification']['OnDemandTargetCapacity'],
'SpotTargetCapacity': item['TargetCapacitySpecification']['SpotTargetCapacity'],
'DefaultTargetCapacityType': item['TargetCapacitySpecification']['DefaultTargetCapacityType'],
'TerminateInstancesWithExpiration': item['TerminateInstancesWithExpiration'],
'Type': item['Type'],
'InstanceInterruptionBehavior': item['SpotOptions']['InstanceInterruptionBehavior'],
})
if 'Tags' in item:
for tag in item['Tags']:
data[i].update({
tag['Key']: tag['Value']
})
output.append(item)
try:
raw = json.loads(json.dumps(output, cls=DatetimeEncoder))
except ValueError as e:
return_error('Could not decode/encode the raw response - {err_msg}'.format(err_msg=e))
ec = {'AWS.EC2.Fleet(val.FleetId === obj.FleetId)': raw}
human_readable = tableToMarkdown('AWS EC2 Fleets', data)
return_outputs(human_readable, ec)
def describe_fleet_instances_command(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
obj = vars(client._client_config)
data = []
kwargs = {}
output = []
if args.get('filters') is not None:
kwargs.update({'Filters': parse_filter_field(args.get('filters'))})
if args.get('FleetId') is not None:
kwargs.update({'FleetId': args.get('FleetId')})
if args.get('MaxResults') is not None:
kwargs.update({'MaxResults': int(args.get('MaxResults'))})
if args.get('NextToken') is not None:
kwargs.update({'NextToken': args.get('NextToken')})
response = client.describe_fleet_instances(**kwargs)
for i, item in enumerate(response['ActiveInstances']):
demisto.log(str(item))
data.append({
'InstanceId': item['InstanceId'],
'InstanceType': item['InstanceType'],
'SpotInstanceRequestId': item['SpotInstanceRequestId'],
'FleetId': response['FleetId'],
'Region': obj['_user_provided_options']['region_name'],
})
if 'InstanceHealth' in item:
data.append({'InstanceHealth': item['InstanceHealth']})
output.append(item)
try:
raw = json.loads(json.dumps(output, cls=DatetimeEncoder))
except ValueError as e:
return_error('Could not decode/encode the raw response - {err_msg}'.format(err_msg=e))
ec = {'AWS.EC2.Fleet(val.FleetId === obj.FleetId).ActiveInstances': raw}
human_readable = tableToMarkdown('AWS EC2 Fleets Instances', data)
return_outputs(human_readable, ec)
def modify_fleet_command(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
kwargs = {}
if args.get('FleetId') is not None:
kwargs.update({'FleetIds': args.get('FleetId')})
if args.get('ExcessCapacityTerminationPolicy') is not None:
kwargs.update({'ExcessCapacityTerminationPolicy': args.get('ExcessCapacityTerminationPolicy')})
TargetCapacitySpecification = {}
if args.get('TotalTargetCapacity') is not None:
TargetCapacitySpecification.update({
'TotalTargetCapacity': int(args.get('TotalTargetCapacity'))
})
if args.get('OnDemandTargetCapacity') is not None:
TargetCapacitySpecification.update({
'OnDemandTargetCapacity': int(args.get('OnDemandTargetCapacity'))
})
if args.get('SpotTargetCapacity') is not None:
TargetCapacitySpecification.update({
'SpotTargetCapacity': int(args.get('SpotTargetCapacity'))
})
if args.get('DefaultTargetCapacityType') is not None:
TargetCapacitySpecification.update({
'DefaultTargetCapacityType': args.get('DefaultTargetCapacityType')
})
if TargetCapacitySpecification:
kwargs.update({'TargetCapacitySpecification': TargetCapacitySpecification})
response = client.modify_fleet(**kwargs)
if response['Return'] == 'True':
demisto.results("AWS EC2 Fleet was successfully modified")
else:
demisto.results("AWS EC2 Fleet was not successfully modified: " + response['Return'])
def create_launch_template_command(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
obj = vars(client._client_config) # noqa:F841
kwargs = {}
BlockDeviceMappings = {} # type: dict
LaunchTemplateData = {} # type: dict
if args.get('ClientToken') is not None:
kwargs.update({'ClientToken': args.get('ClientToken')})
if args.get('LaunchTemplateName') is not None:
kwargs.update({'LaunchTemplateName': args.get('LaunchTemplateName')})
if args.get('VersionDescription') is not None:
kwargs.update({'VersionDescription': args.get('VersionDescription')})
if args.get('KernelId') is not None:
LaunchTemplateData.update({'KernelId': args.get('KernelId')})
if args.get('EbsOptimized') is not None:
LaunchTemplateData.update({'EbsOptimized': args.get('EbsOptimized')})
if args.get('iamInstanceProfileArn') is not None and args.get('iamInstanceProfileName') is not None:
LaunchTemplateData.update({
'IamInstanceProfile': {
'Arn': args.get('iamInstanceProfileArn'),
'Name': args.get('iamInstanceProfileName')}
})
if args.get('deviceName') is not None:
BlockDeviceMappings = {'DeviceName': args.get('deviceName')}
BlockDeviceMappings.update({'Ebs': {}})
if args.get('VirtualName') is not None:
BlockDeviceMappings.update({'VirtualName': {args.get('VirtualName')}})
if args.get('ebsVolumeSize') is not None:
BlockDeviceMappings['Ebs'].update({'VolumeSize': int(args.get('ebsVolumeSize'))})
if args.get('ebsVolumeType') is not None:
BlockDeviceMappings['Ebs'].update({'VolumeType': args.get('ebsVolumeType')})
if args.get('ebsIops') is not None:
BlockDeviceMappings['Ebs'].update({'Iops': int(args.get('ebsIops'))})
if args.get('ebsDeleteOnTermination') is not None:
BlockDeviceMappings['Ebs'].update(
{'DeleteOnTermination': True if args.get('ebsDeleteOnTermination') == 'True' else False})
if args.get('ebsKmsKeyId') is not None:
BlockDeviceMappings['Ebs'].update({'KmsKeyId': args.get('ebsKmsKeyId')})
if args.get('ebsSnapshotId') is not None:
BlockDeviceMappings['Ebs'].update({'SnapshotId': args.get('ebsSnapshotId')})
if args.get('ebsEncrypted') is not None:
BlockDeviceMappings['Ebs'].update({'Encrypted': True if args.get('ebsEncrypted') == 'True' else False})
if args.get('NoDevice') is not None:
BlockDeviceMappings.update({'NoDevice': {args.get('NoDevice')}})
if BlockDeviceMappings:
LaunchTemplateData.update({'BlockDeviceMappings': [BlockDeviceMappings]})
NetworkInterfaces = {} # type: dict
if args.get('AssociatePublicIpAddress') is not None:
NetworkInterfaces.update({'AssociatePublicIpAddress': args.get('AssociatePublicIpAddress')})
if args.get('NetworkInterfacesDeleteOnTermination') is not None:
NetworkInterfaces.update({'DeleteOnTermination': args.get('NetworkInterfacesDeleteOnTermination')})
if args.get('NetworkInterfacesDescription') is not None:
NetworkInterfaces.update({'Description': args.get('NetworkInterfacesDescription')})
if args.get('NetworkInterfacesDeviceIndex') is not None:
NetworkInterfaces.update({'DeviceIndex': args.get('NetworkInterfacesDeviceIndex')})
if args.get('NetworkInterfaceGroups') is not None:
NetworkInterfaces.update({'Groups': parse_resource_ids(args.get('NetworkInterfaceGroups'))})
if args.get('Ipv6AddressCount') is not None:
NetworkInterfaces.update({'Ipv6AddressCount': args.get('Ipv6AddressCount')})
if args.get('Ipv6Addresses') is not None:
arr = args.get('Ipv6Addresses').split(',')
NetworkInterfaces.update({'Ipv6Addresses': []})
for a in arr:
NetworkInterfaces['Ipv6Addresses'].append({'Ipv6Address': a})
if args.get('NetworkInterfaceId') is not None:
NetworkInterfaces.update({'NetworkInterfaceId': args.get('NetworkInterfaceId')})
if args.get('PrivateIpAddress') is not None:
NetworkInterfaces.update({'PrivateIpAddress': args.get('PrivateIpAddress')})
if args.get('SubnetId') is not None:
NetworkInterfaces.update({'SubnetId': args.get('SubnetId')})
if NetworkInterfaces:
LaunchTemplateData.update({'NetworkInterfaces': [NetworkInterfaces]})
if args.get('ImageId') is not None:
LaunchTemplateData.update({'ImageId': args.get('ImageId')})
if args.get('InstanceType') is not None:
LaunchTemplateData.update({'InstanceType': args.get('InstanceType')})
if args.get('KeyName') is not None:
LaunchTemplateData.update({'KeyName': args.get('KeyName')})
if args.get('Monitoring') is not None:
LaunchTemplateData.update({'Monitoring': {'Enabled': args.get('Monitoring')}})
if args.get('AvailabilityZone') is not None:
LaunchTemplateData.update({
'Placement': {
'AvailabilityZone': args.get('AvailabilityZone')}
})
if args.get('AvailabilityZoneGroupName') is not None:
LaunchTemplateData.update({
'Placement': {
'GroupName': args.get('AvailabilityZoneGroupName')}
})
if args.get('PlacementTenancy') is not None:
LaunchTemplateData.update({
'Placement': {
'Tenancy': args.get('PlacementTenancy')}
})
if args.get('PlacementAffinity') is not None:
LaunchTemplateData.update({
'Placement': {
'Affinity': args.get('PlacementAffinity')}
})
if args.get('PlacementHostId') is not None:
LaunchTemplateData.update({
'Placement': {
'HostId': args.get('PlacementHostId')}
})
if args.get('PlacementSpreadDomain') is not None:
LaunchTemplateData.update({
'Placement': {
'SpreadDomain': args.get('PlacementSpreadDomain')}
})
if args.get('RamDiskId') is not None:
LaunchTemplateData.update({'RamDiskId': args.get('RamDiskId')})
if args.get('DisableApiTermination') is not None:
LaunchTemplateData.update({'DisableApiTermination': args.get('DisableApiTermination')})
if args.get('InstanceInitiatedShutdownBehavior') is not None:
LaunchTemplateData.update(
{'InstanceInitiatedShutdownBehavior': args.get('InstanceInitiatedShutdownBehavior')})
if args.get('UserData') is not None:
LaunchTemplateData.update({'UserData': args.get('UserData')})
TagSpecifications = [] # type: list
if args.get('Tags') is not None:
arr = args.get('Tags').split('#')
for i, item in enumerate(arr):
if len(TagSpecifications) - 1 < (i):
TagSpecifications.append({})
tg = item.split(':')
TagSpecifications[i].update({
'ResourceType': tg[0],
'Tags': parse_tag_field(tg[1])
})
ElasticGpuSpecifications = [] # type: list
if args.get('ElasticGpuSpecificationsType') is not None:
arr = multi_split(args.get('ElasticGpuSpecificationsType'))
for i, item in enumerate(arr):
if len(ElasticGpuSpecifications) - 1 < i:
ElasticGpuSpecifications.append({})
ElasticGpuSpecifications[i].update({
'Type': item
})
if ElasticGpuSpecifications:
LaunchTemplateData.update({'ElasticGpuSpecifications': ElasticGpuSpecifications})
ElasticInferenceAccelerators = [] # type: list
if args.get('ElasticInferenceAcceleratorsType') is not None:
arr = multi_split(args.get('ElasticInferenceAcceleratorsType'))
for i, item in enumerate(arr):
if len(ElasticInferenceAccelerators) - 1 < i:
ElasticInferenceAccelerators.append({})
ElasticInferenceAccelerators[i].update({
'Type': item
})
if ElasticGpuSpecifications:
LaunchTemplateData.update({'ElasticInferenceAccelerators': ElasticInferenceAccelerators})
if TagSpecifications:
LaunchTemplateData.update({'TagSpecifications': TagSpecifications})
if args.get('securityGroupIds') is not None:
LaunchTemplateData.update({'SecurityGroupIds': parse_resource_ids(args.get('securityGroupIds'))})
if args.get('securityGroups') is not None:
LaunchTemplateData.update({'SecurityGroups': parse_resource_ids(args.get('securityGroups'))})
InstanceMarketOptions = {} # type: dict
if args.get('MarketType') is not None:
InstanceMarketOptions.update({
'MarketType': args.get('MarketType')
})
SpotOptions = {} # type: dict
if args.get('SpotInstanceType') is not None:
SpotOptions.update({
'SpotInstanceType': args.get('SpotInstanceType')
})
if args.get('BlockDurationMinutes') is not None:
SpotOptions.update({
'BlockDurationMinutes': args.get('BlockDurationMinutes')
})
if args.get('SpotValidUntil') is not None:
SpotOptions.update({
'ValidUntil': parse_date(args.get('SpotValidUntil'))
})
if args.get('SpotInstanceInterruptionBehavior') is not None:
SpotOptions.update({
'InstanceInterruptionBehavior': args.get('SpotInstanceInterruptionBehavior')
})
if args.get('SpotMaxPrice') is not None:
SpotOptions.update({
'MaxPrice': args.get('SpotMaxPrice')
})
if SpotOptions:
InstanceMarketOptions.update({'SpotOptions': SpotOptions})
if InstanceMarketOptions:
LaunchTemplateData.update({'InstanceMarketOptions': InstanceMarketOptions})
if LaunchTemplateData:
kwargs.update({'LaunchTemplateData': LaunchTemplateData})
response = client.create_launch_template(**kwargs)
data = []
template = response['LaunchTemplate']
data.append({
'LaunchTemplateId': response['LaunchTemplate']['LaunchTemplateId'],
'LaunchTemplateName': response['LaunchTemplate']['LaunchTemplateName'],
'CreateTime': response['LaunchTemplate']['CreateTime'],
'CreatedBy': response['LaunchTemplate']['CreatedBy'],
'DefaultVersionNumber': response['LaunchTemplate']['DefaultVersionNumber'],
'LatestVersionNumber': response['LaunchTemplate']['LatestVersionNumber'],
})
try:
output = json.dumps(template, cls=DatetimeEncoder)
data_json = json.dumps(data, cls=DatetimeEncoder)
data_hr = json.loads(data_json) # type: ignore
raw = json.loads(output)
except ValueError as e:
return_error('Could not decode/encode the raw response - {err_msg}'.format(err_msg=e))
ec = {'AWS.EC2.LaunchTemplates': raw}
human_readable = tableToMarkdown('AWS LaunchTemplates', data_hr)
return_outputs(human_readable, ec)
def delete_launch_template_command(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
obj = vars(client._client_config) # noqa:F841
data = []
kwargs = {}
output = []
if args.get('LaunchTemplateId') is not None:
kwargs.update({'LaunchTemplateId': args.get('LaunchTemplateId')})
if args.get('LaunchTemplateName') is not None:
kwargs.update({'LaunchTemplateName': args.get('LaunchTemplateName')})
response = client.delete_launch_template(**kwargs)
item = response['LaunchTemplate']
data.append({
'LaunchTemplateId': item['LaunchTemplateId'],
'LaunchTemplateName': item['LaunchTemplateName'],
'CreateTime': datetime.strftime(item['CreateTime'], '%Y-%m-%dT%H:%M:%SZ'),
'CreatedBy': item['CreatedBy'],
'DefaultVersionNumber': item['DefaultVersionNumber'],
'LatestVersionNumber': item['LatestVersionNumber'],
})
output.append(item)
try:
raw = json.loads(json.dumps(output, cls=DatetimeEncoder))
except ValueError as e:
return_error('Could not decode/encode the raw response - {err_msg}'.format(err_msg=e))
ec = {'AWS.EC2.DeletedLaunchTemplates': raw}
human_readable = tableToMarkdown('AWS Deleted Launch Templates', data)
return_outputs(human_readable, ec)
def modify_image_attribute_command(args):
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
obj = vars(client._client_config) # noqa:F841
kwargs = {}
if args.get('Attribute') is not None:
kwargs.update({'Attribute': args.get('Attribute')})
if args.get('Description') is not None:
kwargs.update({'Description': {'Value': args.get('Description')}})
if args.get('ImageId') is not None:
kwargs.update({'ImageId': args.get('ImageId')})
LaunchPermission = {"Add": [], "Remove": []} # type: dict
if args.get('LaunchPermission-Add-Group') is not None:
LaunchPermission["Add"].append({'Group': args.get('LaunchPermission-Add-Group')})
if args.get('LaunchPermission-Add-UserId') is not None:
LaunchPermission["Add"].append({'UserId': args.get('LaunchPermission-Add-UserId')})
if args.get('LaunchPermission-Remove-Group') is not None:
LaunchPermission["Remove"].append({'Group': args.get('LaunchPermission-Remove-Group')})
if args.get('LaunchPermission-Remove-UserId') is not None:
LaunchPermission["Remove"].append({'UserId': args.get('LaunchPermission-Remove-UserId')})
if LaunchPermission:
kwargs.update({'LaunchPermission': LaunchPermission})
if args.get('OperationType') is not None:
kwargs.update({'OperationType': args.get('OperationType')})
if args.get('ProductCodes') is not None:
kwargs.update({'ProductCodes': parse_resource_ids(args.get('ProductCodes'))})
if args.get('UserGroups') is not None:
kwargs.update({'UserGroups': parse_resource_ids(args.get('UserGroups'))})
if args.get('UserIds') is not None:
kwargs.update({'UserIds': parse_resource_ids(args.get('UserIds'))})
if args.get('Value') is not None:
kwargs.update({'Value': args.get('Value')})
response = client.modify_image_attribute(**kwargs)
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results('Image attribute sucessfully modified')
"""COMMAND BLOCK"""
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'test-module':
# This is the call made when pressing the integration test button.
client = aws_session()
response = client.describe_regions()
if response['ResponseMetadata']['HTTPStatusCode'] == 200:
demisto.results('ok')
elif demisto.command() == 'aws-ec2-describe-regions':
describe_regions_command(demisto.args())
elif demisto.command() | |
<reponame>Ling-Jun/geo-deep-learning<filename>train_segmentation.py
import os
import time
import h5py
import torch
import warnings
import functools
import numpy as np
from PIL import Image
from tqdm import tqdm
from pathlib import Path
from shutil import copy
from datetime import datetime
from typing import Sequence
from collections import OrderedDict
from omegaconf import DictConfig
from omegaconf.errors import ConfigKeyError
try:
from pynvml import *
except ModuleNotFoundError:
warnings.warn(f"The python Nvidia management library could not be imported. Ignore if running on CPU only.")
from torch.utils.data import DataLoader
from sklearn.utils import compute_sample_weight
from utils import augmentation as aug, create_dataset
from utils.logger import InformationLogger, save_logs_to_bucket, tsv_line, dict_path
from utils.metrics import report_classification, create_metrics_dict, iou
from models.model_choice import net, load_checkpoint, verify_weights
from utils.utils import load_from_checkpoint, get_device_ids, gpu_stats, get_key_def, read_modalities
from utils.visualization import vis_from_batch
from mlflow import log_params, set_tracking_uri, set_experiment, start_run
# Set the logging file
from utils import utils
logging = utils.get_logger(__name__) # import logging
def flatten_labels(annotations):
"""Flatten labels"""
flatten = annotations.view(-1)
return flatten
def flatten_outputs(predictions, number_of_classes):
"""Flatten the prediction batch except the prediction dimensions"""
logits_permuted = predictions.permute(0, 2, 3, 1)
logits_permuted_cont = logits_permuted.contiguous()
outputs_flatten = logits_permuted_cont.view(-1, number_of_classes)
return outputs_flatten
def loader(path):
img = Image.open(path)
return img
def create_dataloader(samples_folder: Path,
batch_size: int,
eval_batch_size: int,
gpu_devices_dict: dict,
sample_size: int,
dontcare_val: int,
crop_size: int,
num_bands: int,
BGR_to_RGB: bool,
scale: Sequence,
cfg: DictConfig,
dontcare2backgr: bool = False,
calc_eval_bs: bool = False,
debug: bool = False):
"""
Function to create dataloader objects for training, validation and test datasets.
:param samples_folder: path to folder containting .hdf5 files if task is segmentation
:param batch_size: (int) batch size
:param gpu_devices_dict: (dict) dictionary where each key contains an available GPU with its ram info stored as value
:param sample_size: (int) size of hdf5 samples (used to evaluate eval batch-size)
:param dontcare_val: (int) value in label to be ignored during loss calculation
:param num_bands: (int) number of bands in imagery
:param BGR_to_RGB: (bool) if True, BGR channels will be flipped to RGB
:param scale: (List) imagery data will be scaled to this min and max value (ex.: 0 to 1)
:param cfg: (dict) Parameters found in the yaml config file.
:param dontcare2backgr: (bool) if True, all dontcare values in label will be replaced with 0 (background value)
before training
:return: trn_dataloader, val_dataloader, tst_dataloader
"""
if not samples_folder.is_dir():
raise logging.critical(FileNotFoundError(f'\nCould not locate: {samples_folder}'))
if not len([f for f in samples_folder.glob('**/*.hdf5')]) >= 1:
raise logging.critical(FileNotFoundError(f"\nCouldn't locate .hdf5 files in {samples_folder}"))
num_samples, samples_weight = get_num_samples(samples_path=samples_folder, params=cfg, dontcare=dontcare_val)
if not num_samples['trn'] >= batch_size and num_samples['val'] >= batch_size:
raise logging.critical(ValueError(f"\nNumber of samples in .hdf5 files is less than batch size"))
logging.info(f"\nNumber of samples : {num_samples}")
dataset_constr = create_dataset.SegmentationDataset
datasets = []
for subset in ["trn", "val", "tst"]:
datasets.append(dataset_constr(samples_folder, subset, num_bands,
max_sample_count=num_samples[subset],
dontcare=dontcare_val,
radiom_transform=aug.compose_transforms(params=cfg,
dataset=subset,
aug_type='radiometric'),
geom_transform=aug.compose_transforms(params=cfg,
dataset=subset,
aug_type='geometric',
dontcare=dontcare_val,
crop_size=crop_size),
totensor_transform=aug.compose_transforms(params=cfg,
dataset=subset,
input_space=BGR_to_RGB,
scale=scale,
dontcare2backgr=dontcare2backgr,
dontcare=dontcare_val,
aug_type='totensor'),
params=cfg,
debug=debug))
trn_dataset, val_dataset, tst_dataset = datasets
# Number of workers
if cfg.training.num_workers:
num_workers = cfg.training.num_workers
else: # https://discuss.pytorch.org/t/guidelines-for-assigning-num-workers-to-dataloader/813/5
num_workers = len(gpu_devices_dict.keys()) * 4 if len(gpu_devices_dict.keys()) > 1 else 4
samples_weight = torch.from_numpy(samples_weight)
sampler = torch.utils.data.sampler.WeightedRandomSampler(samples_weight.type('torch.DoubleTensor'),
len(samples_weight))
if gpu_devices_dict and calc_eval_bs:
max_pix_per_mb_gpu = 280 # TODO: this value may need to be finetuned
eval_batch_size = calc_eval_batchsize(gpu_devices_dict, batch_size, sample_size, max_pix_per_mb_gpu)
trn_dataloader = DataLoader(trn_dataset, batch_size=batch_size, num_workers=num_workers, sampler=sampler,
drop_last=True)
val_dataloader = DataLoader(val_dataset, batch_size=eval_batch_size, num_workers=num_workers, shuffle=False,
drop_last=True)
tst_dataloader = DataLoader(tst_dataset, batch_size=eval_batch_size, num_workers=num_workers, shuffle=False,
drop_last=True) if num_samples['tst'] > 0 else None
return trn_dataloader, val_dataloader, tst_dataloader
def calc_eval_batchsize(gpu_devices_dict: dict, batch_size: int, sample_size: int, max_pix_per_mb_gpu: int = 280):
"""
Calculate maximum batch size that could fit on GPU during evaluation based on thumb rule with harcoded
"pixels per MB of GPU RAM" as threshold. The batch size often needs to be smaller if crop is applied during training
@param gpu_devices_dict: dictionary containing info on GPU devices as returned by lst_device_ids (utils.py)
@param batch_size: batch size for training
@param sample_size: size of hdf5 samples
@return: returns a downgraded evaluation batch size if the original batch size is considered too high compared to
the GPU's memory
"""
eval_batch_size_rd = batch_size
# get max ram for smallest gpu
smallest_gpu_ram = min(gpu_info['max_ram'] for _, gpu_info in gpu_devices_dict.items())
# rule of thumb to determine eval batch size based on approximate max pixels a gpu can handle during evaluation
pix_per_mb_gpu = (batch_size / len(gpu_devices_dict.keys()) * sample_size ** 2) / smallest_gpu_ram
if pix_per_mb_gpu >= max_pix_per_mb_gpu:
eval_batch_size = smallest_gpu_ram * max_pix_per_mb_gpu / sample_size ** 2
eval_batch_size_rd = int(eval_batch_size - eval_batch_size % len(gpu_devices_dict.keys()))
eval_batch_size_rd = 1 if eval_batch_size_rd < 1 else eval_batch_size_rd
logging.warning(f'Validation and test batch size downgraded from {batch_size} to {eval_batch_size} '
f'based on max ram of smallest GPU available')
return eval_batch_size_rd
def get_num_samples(samples_path, params, dontcare):
"""
Function to retrieve number of samples, either from config file or directly from hdf5 file.
:param samples_path: (str) Path to samples folder
:param params: (dict) Parameters found in the yaml config file.
:param dontcare:
:return: (dict) number of samples for trn, val and tst.
"""
num_samples = {'trn': 0, 'val': 0, 'tst': 0}
weights = []
samples_weight = None
for i in ['trn', 'val', 'tst']:
if get_key_def(f"num_{i}_samples", params['training'], None) is not None:
num_samples[i] = get_key_def(f"num_{i}_samples", params['training'])
with h5py.File(samples_path.joinpath(f"{i}_samples.hdf5"), 'r') as hdf5_file:
file_num_samples = len(hdf5_file['map_img'])
if num_samples[i] > file_num_samples:
raise logging.critical(
IndexError(f"\nThe number of training samples in the configuration file ({num_samples[i]}) "
f"exceeds the number of samples in the hdf5 training dataset ({file_num_samples}).")
)
else:
with h5py.File(samples_path.joinpath(f"{i}_samples.hdf5"), "r") as hdf5_file:
num_samples[i] = len(hdf5_file['map_img'])
with h5py.File(samples_path.joinpath(f"{i}_samples.hdf5"), "r") as hdf5_file:
if i == 'trn':
for x in range(num_samples[i]):
label = hdf5_file['map_img'][x]
unique_labels = np.unique(label)
weights.append(''.join([str(int(i)) for i in unique_labels]))
samples_weight = compute_sample_weight('balanced', weights)
return num_samples, samples_weight
def vis_from_dataloader(vis_params,
eval_loader,
model,
ep_num,
output_path,
dataset='',
scale=None,
device=None,
vis_batch_range=None):
"""
Use a model and dataloader to provide outputs that can then be sent to vis_from_batch function to visualize performances of model, for example.
:param vis_params: (dict) Parameters found in the yaml config file useful for visualization
:param eval_loader: data loader
:param model: model to evaluate
:param ep_num: epoch index (for file naming purposes)
:param dataset: (str) 'val or 'tst'
:param device: device used by pytorch (cpu ou cuda)
:param vis_batch_range: (int) max number of samples to perform visualization on
:return:
"""
vis_path = output_path.joinpath(f'visualization')
logging.info(f'Visualization figures will be saved to {vis_path}\n')
min_vis_batch, max_vis_batch, increment = vis_batch_range
model.eval()
with tqdm(eval_loader, dynamic_ncols=True) as _tqdm:
for batch_index, data in enumerate(_tqdm):
if vis_batch_range is not None and batch_index in range(min_vis_batch, max_vis_batch, increment):
with torch.no_grad():
try: # For HPC when device 0 not available. Error: RuntimeError: CUDA error: invalid device ordinal
inputs = data['sat_img'].to(device)
labels = data['map_img'].to(device)
except RuntimeError:
logging.exception(f'Unable to use device {device}. Trying "cuda:0"')
device = torch.device('cuda')
inputs = data['sat_img'].to(device)
labels = data['map_img'].to(device)
outputs = model(inputs)
if isinstance(outputs, OrderedDict):
outputs = outputs['out']
vis_from_batch(vis_params, inputs, outputs,
batch_index=batch_index,
vis_path=vis_path,
labels=labels,
dataset=dataset,
ep_num=ep_num,
scale=scale)
logging.info(f'Saved visualization figures.\n')
def training(train_loader,
model,
criterion,
optimizer,
scheduler,
num_classes,
batch_size,
ep_idx,
progress_log,
device,
scale,
vis_params,
debug=False
):
"""
Train the model and return the metrics of the training epoch
:param train_loader: training data loader
:param model: model to train
:param criterion: loss criterion
:param optimizer: optimizer to use
:param scheduler: learning rate scheduler
:param num_classes: number of classes
:param batch_size: number of samples to process simultaneously
:param ep_idx: epoch index (for hypertrainer log)
:param progress_log: progress log file (for hypertrainer log)
:param device: device used by pytorch (cpu ou cuda)
:param scale: Scale to which values in sat img have been redefined. Useful during visualization
:param vis_params: (Dict) Parameters useful during visualization
:param debug: (bool) Debug mode
:return: Updated training loss
"""
model.train()
train_metrics = create_metrics_dict(num_classes)
for batch_index, data in enumerate(tqdm(train_loader, desc=f'Iterating train batches with {device.type}')):
progress_log.open('a', buffering=1).write(tsv_line(ep_idx, 'trn', batch_index, len(train_loader), time.time()))
try: # For HPC when device 0 not available. Error: RuntimeError: CUDA error: invalid device ordinal
inputs = data['sat_img'].to(device)
labels = data['map_img'].to(device)
except RuntimeError:
logging.exception(f'Unable to use device {device}. Trying "cuda:0"')
device = torch.device('cuda')
inputs = data['sat_img'].to(device)
labels = data['map_img'].to(device)
# forward
optimizer.zero_grad()
outputs = model(inputs)
# added for torchvision models that output an OrderedDict with outputs in 'out' key.
# More info: https://pytorch.org/hub/pytorch_vision_deeplabv3_resnet101/
if isinstance(outputs, OrderedDict):
outputs = outputs['out']
# vis_batch_range: range of batches to perform visualization on. see README.md for more info.
# vis_at_eval: (bool) if True, will | |
<reponame>EMBEDDIA/PropStar<filename>propStar.py
## propStar example use, skrlj 2020 use at own discretion
import pandas as pd
import queue
import networkx as nx
import tqdm
from collections import defaultdict, OrderedDict
from sklearn.dummy import DummyClassifier
from sklearn.feature_extraction.text import TfidfVectorizer, HashingVectorizer
from sklearn import preprocessing
import re
from neural import * ## DRMs
from learning import * ## starspace
from vectorizers import * ## ConjunctVectorizer
import logging
logging.basicConfig(format='%(asctime)s - %(message)s',
datefmt='%d-%b-%y %H:%M:%S')
logging.getLogger().setLevel(logging.INFO)
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import KBinsDiscretizer
class OrderedDictList(OrderedDict):
def __missing__(self, k):
self[k] = []
return self[k]
def cleanp(stx):
"""
Simple string cleaner
"""
return stx.replace("(", "").replace(")", "").replace(",", "")
def interpolate_nans(X):
"""
Simply replace nans with column means for numeric variables.
input: matrix X with present nans
output: a filled matrix X
"""
for j in range(X.shape[1]):
mask_j = np.isnan(X[:, j])
X[mask_j, j] = np.mean(np.flatnonzero(X))
return X
def discretize_candidates(df, types, ratio_threshold=0.20, n_bins=20):
"""
Continuous variables are discretized if more than 30% of the rows are unique.
"""
ratio_storage = {}
for enx, type_var in enumerate(types):
if "int" in type_var or "decimal" in type_var or "float" in type_var:
ratio_storage = 1. * df[enx].nunique() / df[enx].count()
if ratio_storage > ratio_threshold and ratio_storage != 1.0:
to_validate = df[enx].values
parsed_array = np.array(
[np.nan if x == "NULL" else float(x) for x in to_validate])
parsed_array = interpolate_nans(parsed_array.reshape(-1, 1))
to_be_discretized = parsed_array.reshape(-1, 1)
var = KBinsDiscretizer(
encode="ordinal",
n_bins=n_bins).fit_transform(to_be_discretized)
df[enx] = var
if np.isnan(var).any():
continue ## discretization fail
df[enx] = df[enx].astype(str) ## cast back to str.
return df
def clear(stx):
"""
Clean the unneccesary parenthesis
"""
return stx.replace("`", "").replace("`", "")
def table_generator(sql_file, variable_types):
"""
A simple SQLite parser. This is inspired by the official SQL library, yet keeps only minimal overhead.
input: a .sql data dump from e.g., relational.fit.cz
output: Pandas represented-linked dataframe
"""
table_trigger = False
table_header = False
current_table = None
sqt = defaultdict(list)
tabu = ["KEY", "PRIMARY", "CONSTRAINT"]
table_keys = defaultdict(list)
primary_keys = {}
foreign_key_graph = []
fill_table = False
tables = dict()
header_init = False
col_types = []
## Read the file table-by-table (This could be done in a lazy manner if needed)
with open(sql_file, "r", encoding="utf-8", errors="ignore") as sqf:
for line in sqf:
if "CREATE TABLE" in line:
header_init = True
if header_init:
if "DEFAULT" in line:
if "ENGINE" in line:
continue
ctype = line.split()[1]
col_types.append(ctype)
if "INSERT INTO" in line:
## Do some basic cleaning and create the dataframe
table_header = False
header_init = False
vals = line.strip().split()
vals_real = " ".join(vals[4:]).split("),(")
vals_real[0] = vals_real[0].replace("(", "")
vals_real[len(vals_real) - 1] = vals_real[len(vals_real) -
1].replace(");", "")
col_num = len(sqt[current_table])
vx = list(
filter(lambda x: len(x) == col_num, [
re.split(r",(?=(?:[^\']*\'[^\']*\')*[^\']*$)", x)
for x in vals_real
]))
if len(vx) == 0:
## this was added for the movies.sql case
vx = []
for x in vals_real:
parts = x.split(",")
vx.append(parts[len(parts) - col_num:])
dfx = pd.DataFrame(vx)
## Discretize continuous attributes.
# if dfx.shape[1] == len(col_types):
# dfx = discretize_candidates(dfx,col_types)
col_types = []
try:
assert dfx.shape[1] == len(sqt[current_table])
except:
logging.info(sqt[current_table])
logging.info(
col_num,
re.split(r",(?=(?:[^\']*\'[^\']*\')*[^\']*$)",
vals_real[0]))
try:
dfx.columns = [clear(x) for x in sqt[current_table]
] ## some name reformatting.
except:
dfx.columns = [x for x in sqt[current_table]
] ## some name reformatting.
tables[current_table] = dfx
## get the foreign key graph.
if table_trigger and table_header:
line = line.strip().split()
if len(line) > 0:
if line[0] not in tabu:
if line[0] != "--":
if re.sub(r'\([^)]*\)', '',
line[1]).lower() in variable_types:
sqt[current_table].append(clear(line[0]))
else:
if line[0] == "KEY":
table_keys[current_table].append(clear(line[2]))
if line[0] == "PRIMARY":
primary_keys[current_table] = cleanp(clear(
line[2]))
table_keys[current_table].append(clear(line[2]))
if line[0] == "CONSTRAINT":
## Structure in the form of (t1 a1 t2 a2) is used.
foreign_key_quadruplet = [
clear(cleanp(x)) for x in
[current_table, line[4], line[6], line[7]]
]
foreign_key_graph.append(foreign_key_quadruplet)
if "CREATE TABLE" in line:
table_trigger = True
table_header = True
current_table = clear(line.strip().split(" ")[2])
return tables, foreign_key_graph, primary_keys
def get_table_keys(quadruplet):
"""
A basic method for gaining a given table's keys.
"""
tk = defaultdict(set)
for entry in quadruplet:
tk[entry[0]].add(entry[1])
tk[entry[2]].add(entry[3])
return tk
def relational_words_to_matrix(fw,
relation_order,
vectorization_type="tfidf",
max_features=10000):
"""
Employ the conjuncVectorizer to obtain zero order features.
input: documents
output: a sparse matrix
"""
docs = []
if vectorization_type == "tfidf" or vectorization_type == "binary":
if vectorization_type == "tfidf":
vectorizer = conjunctVectorizer(max_atoms=relation_order,
max_features=max_features)
elif vectorization_type == "binary":
vectorizer = conjunctVectorizer(max_atoms=relation_order,
binary=True,
max_features=max_features)
for k, v in fw.items():
docs.append(set(v))
mtx = vectorizer.fit_transform(docs)
elif vectorization_type == "sklearn_tfidf" or vectorization_type == "sklearn_binary" or vectorization_type == "sklearn_hash":
if vectorization_type == "sklearn_tfidf":
vectorizer = TfidfVectorizer(max_features=max_features,
binary=True)
elif vectorization_type == "sklearn_binary":
vectorizer = TfidfVectorizer(max_features=max_features,
binary=False)
elif vectorization_type == "sklearn_hash":
vectorizer = HashingVectorizer()
for k, v in fw.items():
docs.append(" ".join(v))
mtx = vectorizer.fit_transform(docs)
return mtx, vectorizer
def relational_words_to_matrix_with_vec(fw,
vectorizer,
vectorization_type="tfidf"):
"""
Just do the transformation. This is for proper cross-validation (on the test set)
"""
docs = []
if vectorization_type == "tfidf" or vectorization_type == "binary":
for k, v in fw.items():
docs.append(set(v))
mtx = vectorizer.transform(docs)
else:
for k, v in fw.items():
docs.append(" ".join(v))
mtx = vectorizer.transform(docs)
return mtx
def generate_relational_words(tables,
fkg,
target_table=None,
target_attribute=None,
relation_order=(2, 4),
indices=None,
vectorizer=None,
vectorization_type="tfidf",
num_features=10000):
"""
Key method for generation of relational words and documents.
It traverses individual tables in path, and consequantially appends the witems to a witem set. This method is a rewritten, non exponential (in space) version of the original Wordification algorithm (Perovsek et al, 2014).
input: a collection of tables and a foreign key graph
output: a representation in form of a sparse matrix.
"""
fk_graph = nx.Graph(
) ## a simple undirected graph as the underlying fk structure
core_foreign_keys = set()
all_foreign_keys = set()
for foreign_key in fkg:
## foreing key mapping
t1, k1, t2, k2 = foreign_key
if t1 == target_table:
core_foreign_keys.add(k1)
elif t2 == target_table:
core_foreign_keys.add(k2)
all_foreign_keys.add(k1)
all_foreign_keys.add(k2)
## add link, note that this is in fact a typed graph now
fk_graph.add_edge((t1, k1), (t2, k2))
## this is more efficient than just orderedDict object
feature_vectors = OrderedDictList()
if not indices is None:
core_table = tables[target_table].iloc[indices, :]
else:
core_table = tables[target_table]
all_table_keys = get_table_keys(fkg)
core_foreign = None
target_classes = core_table[target_attribute]
## This is a remnant of one of the experiment, left here for historical reasons :)
if target_attribute == "Delka_hospitalizace":
tars = []
for tc in target_classes:
if int(tc) >= 10:
tars.append(0)
else:
tars.append(1)
target_classes = pd.DataFrame(np.array(tars))
print(np.sum(tars) / len(target_classes))
total_witems = set()
num_witems = 0
## The main propositionalization routine
logging.info("Propositionalization of core table ..")
for index, row in tqdm.tqdm(core_table.iterrows(),
total=core_table.shape[0]):
for i in range(len(row)):
column_name = row.index[i]
if column_name != target_attribute and not column_name in core_foreign_keys:
witem = "-".join([target_table, column_name, row[i]])
feature_vectors[index].append(witem)
num_witems += 1
total_witems.add(witem)
logging.info("Traversing other tables ..")
for core_fk in core_foreign_keys: ## this is normaly a single key.
bfs_traversal = dict(
nx.bfs_successors(fk_graph, (target_table, core_fk)))
## Traverse the row space
for index, row in tqdm.tqdm(core_table.iterrows(),
total=core_table.shape[0]):
current_depth = 0
to_traverse = queue.Queue()
to_traverse.put(target_table) ## seed table
max_depth = 2
tables_considered = 0
parsed_tables = set()
## Perform simple search
while current_depth < max_depth:
current_depth += 1
origin = to_traverse.get()
if current_depth == 1:
successor_tables = bfs_traversal[(origin, core_fk)]
else:
if origin in bfs_traversal:
successor_tables = bfs_traversal[origin]
else:
continue
for succ in successor_tables:
to_traverse.put(succ)
for table in successor_tables:
if (table) in parsed_tables:
continue
parsed_tables.add(table)
first_table_name, first_table_key = origin, core_fk
next_table_name, next_table_key = table
if not first_table_name in tables or not next_table_name in tables:
continue
## link and generate witems
first_table = tables[first_table_name]
second_table = tables[next_table_name]
if first_table_name == target_table:
key_to_compare = row[first_table_key]
elif first_table_name != target_table and current_depth == 2:
key_to_compare = None
for edge in fk_graph.edges():
if edge[0][0] == target_table and edge[1][
0] == first_table_name:
key_to_compare = first_table[first_table[
edge[1][1]] == row[edge[0]
[1]]][first_table_key]
if not key_to_compare is None:
pass
else:
continue
## The second case
trow = second_table[second_table[next_table_key] ==
key_to_compare]
for x in trow.columns:
if not x in all_foreign_keys and x != target_attribute:
for value in trow[x]:
witem = "-".join(
str(x)
for x in [next_table_name, x, value])
total_witems.add(witem)
num_witems += 1
feature_vectors[index].append(witem)
## Summary of the output
logging.info("Stored {} witems..".format(num_witems))
logging.info("Learning representation from {} unique witems.".format(
len(total_witems)))
## Vectorizer is an arbitrary vectorizer, some of the well known ones are implemented here, it's simple to add your own!
if | |
from __future__ import division
from collections import OrderedDict
from functools import partial
import gzip
import io
import os
import logging
import os.path
import h5py
import numpy
from picklable_itertools.extras import equizip
from progressbar import ProgressBar
from PIL import Image
from scipy.io.matlab import loadmat
from six.moves import zip, xrange
import zmq
from fuel.converters.base import check_exists
from fuel.datasets import H5PYDataset
from fuel.utils.formats import tar_open
from fuel.utils.parallel import producer_consumer
from fuel import config
log = logging.getLogger(__name__)
DEVKIT_ARCHIVE = 'ILSVRC2010_devkit-1.0.tar.gz'
DEVKIT_META_PATH = 'devkit-1.0/data/meta.mat'
DEVKIT_VALID_GROUNDTRUTH_PATH = ('devkit-1.0/data/'
'ILSVRC2010_validation_ground_truth.txt')
PATCH_IMAGES_TAR = 'patch_images.tar'
TEST_GROUNDTRUTH = 'ILSVRC2010_test_ground_truth.txt'
TRAIN_IMAGES_TAR = 'ILSVRC2010_images_train.tar'
VALID_IMAGES_TAR = 'ILSVRC2010_images_val.tar'
TEST_IMAGES_TAR = 'ILSVRC2010_images_test.tar'
IMAGE_TARS = (TRAIN_IMAGES_TAR, VALID_IMAGES_TAR, TEST_IMAGES_TAR,
PATCH_IMAGES_TAR)
PUBLIC_FILES = TEST_GROUNDTRUTH, DEVKIT_ARCHIVE
ALL_FILES = PUBLIC_FILES + IMAGE_TARS
@check_exists(required_files=ALL_FILES)
def convert_ilsvrc2010(directory, output_directory,
output_filename='ilsvrc2010.hdf5',
shuffle_seed=config.default_seed):
"""Converter for data from the ILSVRC 2010 competition.
Source files for this dataset can be obtained by registering at
[ILSVRC2010WEB].
Parameters
----------
input_directory : str
Path from which to read raw data files.
output_directory : str
Path to which to save the HDF5 file.
output_filename : str, optional
The output filename for the HDF5 file. Default: 'ilsvrc2010.hdf5'.
shuffle_seed : int or sequence, optional
Seed for a random number generator used to shuffle the order
of the training set on disk, so that sequential reads will not
be ordered by class.
.. [ILSVRC2010WEB] http://image-net.org/challenges/LSVRC/2010/index
"""
devkit_path = os.path.join(directory, DEVKIT_ARCHIVE)
test_groundtruth_path = os.path.join(directory, TEST_GROUNDTRUTH)
train, valid, test, patch = [os.path.join(directory, fn)
for fn in IMAGE_TARS]
n_train, valid_groundtruth, test_groundtruth, wnid_map = \
prepare_metadata(devkit_path, test_groundtruth_path)
n_valid, n_test = len(valid_groundtruth), len(test_groundtruth)
output_path = os.path.join(output_directory, output_filename)
with h5py.File(output_path, 'w') as f:
log.info('Creating HDF5 datasets...')
prepare_hdf5_file(f, n_train, n_valid, n_test)
log.info('Processing training set...')
process_train_set(f, train, patch, n_train, wnid_map, shuffle_seed)
log.info('Processing validation set...')
process_other_set(f, 'valid', valid, patch, valid_groundtruth, n_train)
log.info('Processing test set...')
process_other_set(f, 'test', test, patch, test_groundtruth,
n_train + n_valid)
log.info('Done.')
return (output_path,)
def fill_subparser(subparser):
"""Sets up a subparser to convert the ILSVRC2010 dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `ilsvrc2010` command.
"""
subparser.add_argument(
"--shuffle-seed", help="Seed to use for randomizing order of the "
"training set on disk.",
default=config.default_seed, type=int, required=False)
return convert_ilsvrc2010
def prepare_metadata(devkit_archive, test_groundtruth_path):
"""Extract dataset metadata required for HDF5 file setup.
Parameters
----------
devkit_archive : str or file-like object
The filename or file-handle for the gzipped TAR archive
containing the ILSVRC2010 development kit.
test_groundtruth_path : str or file-like object
The filename or file-handle for the text file containing
the ILSVRC2010 test set ground truth.
Returns
-------
n_train : int
The number of examples in the training set.
valid_groundtruth : ndarray, 1-dimensional
An ndarray containing the validation set groundtruth in terms of
0-based class indices.
test_groundtruth : ndarray, 1-dimensional
An ndarray containing the test groundtruth in terms of 0-based
class indices.
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
"""
# Read what's necessary from the development kit.
synsets, cost_matrix, raw_valid_groundtruth = read_devkit(devkit_archive)
# Mapping to take WordNet IDs to our internal 0-999 encoding.
wnid_map = dict(zip((s.decode('utf8') for s in synsets['WNID']),
xrange(1000)))
# Map the 'ILSVRC2010 ID' to our zero-based ID.
ilsvrc_id_to_zero_based = dict(zip(synsets['ILSVRC2010_ID'],
xrange(len(synsets))))
# Map the validation set groundtruth to 0-999 labels.
valid_groundtruth = [ilsvrc_id_to_zero_based[id_]
for id_ in raw_valid_groundtruth]
# Raw test data groundtruth, ILSVRC2010 IDs.
raw_test_groundtruth = numpy.loadtxt(test_groundtruth_path,
dtype=numpy.int16)
# Map the test set groundtruth to 0-999 labels.
test_groundtruth = [ilsvrc_id_to_zero_based[id_]
for id_ in raw_test_groundtruth]
# Ascertain the number of filenames to prepare appropriate sized
# arrays.
n_train = int(synsets['num_train_images'].sum())
log.info('Training set: {} images'.format(n_train))
log.info('Validation set: {} images'.format(len(valid_groundtruth)))
log.info('Test set: {} images'.format(len(test_groundtruth)))
n_total = n_train + len(valid_groundtruth) + len(test_groundtruth)
log.info('Total (train/valid/test): {} images'.format(n_total))
return n_train, valid_groundtruth, test_groundtruth, wnid_map
def create_splits(n_train, n_valid, n_test):
n_total = n_train + n_valid + n_test
tuples = {}
tuples['train'] = (0, n_train)
tuples['valid'] = (n_train, n_train + n_valid)
tuples['test'] = (n_train + n_valid, n_total)
sources = ['encoded_images', 'targets', 'filenames']
return OrderedDict(
(split, OrderedDict((source, tuples[split]) for source in sources))
for split in ('train', 'valid', 'test')
)
def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):
"""Create datasets within a given HDF5 file.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write.
n_train : int
The number of training set examples.
n_valid : int
The number of validation set examples.
n_test : int
The number of test set examples.
"""
n_total = n_train + n_valid + n_test
splits = create_splits(n_train, n_valid, n_test)
hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)
vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))
hdf5_file.create_dataset('encoded_images', shape=(n_total,),
dtype=vlen_dtype)
hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)
hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')
def process_train_set(hdf5_file, train_archive, patch_archive, n_train,
wnid_map, shuffle_seed=None):
"""Process the ILSVRC2010 training set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`n_train`.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
n_train : int
The number of items in the training set.
wnid_map : dict
A dictionary mapping WordNet IDs to class indices.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
training set on disk. If `None`, no permutation is performed
(this is the default).
"""
producer = partial(train_set_producer, train_archive=train_archive,
patch_archive=patch_archive, wnid_map=wnid_map)
consumer = partial(image_consumer, hdf5_file=hdf5_file,
num_expected=n_train, shuffle_seed=shuffle_seed)
producer_consumer(producer, consumer)
def _write_to_hdf5(hdf5_file, index, image_filename, image_data,
class_index):
hdf5_file['filenames'][index] = image_filename.encode('ascii')
hdf5_file['encoded_images'][index] = image_data
hdf5_file['targets'][index] = class_index
def train_set_producer(socket, train_archive, patch_archive, wnid_map):
"""Load/send images from the training set TAR file or patch images.
Parameters
----------
socket : :class:`zmq.Socket`
PUSH socket on which to send loaded images.
train_archive : str or file-like object
Filename or file handle for the TAR archive of training images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
wnid_map : dict
A dictionary that maps WordNet IDs to 0-based class indices.
Used to decode the filenames of the inner TAR files.
"""
patch_images = extract_patch_images(patch_archive, 'train')
num_patched = 0
with tar_open(train_archive) as tar:
for inner_tar_info in tar:
with tar_open(tar.extractfile(inner_tar_info.name)) as inner:
wnid = inner_tar_info.name.split('.')[0]
class_index = wnid_map[wnid]
filenames = sorted(info.name for info in inner
if info.isfile())
images_gen = (load_from_tar_or_patch(inner, filename,
patch_images)
for filename in filenames)
pathless_filenames = (os.path.split(fn)[-1]
for fn in filenames)
stream = equizip(pathless_filenames, images_gen)
for image_fn, (image_data, patched) in stream:
if patched:
num_patched += 1
socket.send_pyobj((image_fn, class_index), zmq.SNDMORE)
socket.send(image_data)
if num_patched != len(patch_images):
raise ValueError('not all patch images were used')
def image_consumer(socket, hdf5_file, num_expected, shuffle_seed=None,
offset=0):
"""Fill an HDF5 file with incoming images from a socket.
Parameters
----------
socket : :class:`zmq.Socket`
PULL socket on which to receive images.
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
num_expected : int
The number of items we expect to be sent over the socket.
shuffle_seed : int or sequence, optional
Seed for a NumPy random number generator that permutes the
images on disk.
offset : int, optional
The offset in the HDF5 datasets at which to start writing
received examples. Defaults to 0.
"""
with ProgressBar(maxval=num_expected) as pb:
if shuffle_seed is None:
index_gen = iter(xrange(num_expected))
else:
rng = numpy.random.RandomState(shuffle_seed)
index_gen = iter(rng.permutation(num_expected))
for i, num in enumerate(index_gen):
image_filename, class_index = socket.recv_pyobj(zmq.SNDMORE)
image_data = numpy.fromstring(socket.recv(), dtype='uint8')
_write_to_hdf5(hdf5_file, num + offset, image_filename,
image_data, class_index)
pb.update(i + 1)
def process_other_set(hdf5_file, which_set, image_archive, patch_archive,
groundtruth, offset):
"""Process the validation or test set.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write. Assumes `features`, `targets`
and `filenames` already exist and have first dimension larger than
`sum(images_per_class)`.
which_set : str
Which set of images is being processed. One of 'train', 'valid',
'test'. Used for extracting the appropriate images from the patch
archive.
image_archive : str or file-like object
The filename or file-handle for the TAR archive containing images.
patch_archive : str or file-like object
Filename or file handle for the TAR archive of patch images.
groundtruth : iterable
Iterable container containing scalar 0-based class index for each
image, sorted by filename.
offset : int
The offset in the HDF5 datasets at which to start writing.
"""
producer = partial(other_set_producer, image_archive=image_archive,
patch_archive=patch_archive,
groundtruth=groundtruth, which_set=which_set)
consumer = partial(image_consumer, | |
23889,
"name": "Производственная практика, по получению профессиональных умений и опыта профессиональной деятельности",
"term": 10,
"course_project": False
},
{
"id": 23615,
"name": "Производственная практика, преддипломная",
"term": 4,
"course_project": False
},
{
"id": 23615,
"name": "Производственная практика, преддипломная",
"term": 8,
"course_project": False
},
{
"id": 23615,
"name": "Производственная практика, преддипломная",
"term": 10,
"course_project": False
},
{
"id": 23866,
"name": "Производственная практика, проектная",
"term": 6,
"course_project": False
},
{
"id": 23789,
"name": "Производственная практика, проектно-конструкторская",
"term": 6,
"course_project": False
},
{
"id": 23864,
"name": "Производственная практика, производственно-технологическая",
"term": 6,
"course_project": False
},
{
"id": 23864,
"name": "Производственная практика, производственно-технологическая",
"term": 8,
"course_project": False
},
{
"id": 29499,
"name": "Производственная, консультационно-экспертная",
"term": 4,
"course_project": False
},
{
"id": 28529,
"name": "Производственная, научно-исследовательская",
"term": 2,
"course_project": False
},
{
"id": 28529,
"name": "Производственная, научно-исследовательская",
"term": 4,
"course_project": False
},
{
"id": 28529,
"name": "Производственная, научно-исследовательская",
"term": 6,
"course_project": False
},
{
"id": 29053,
"name": "Производственная, научно-исследовательская / Research Internship",
"term": 4,
"course_project": False
},
{
"id": 27111,
"name": "Производственная, научно-исследовательская работа",
"term": 2,
"course_project": False
},
{
"id": 27111,
"name": "Производственная, научно-исследовательская работа",
"term": 4,
"course_project": False
},
{
"id": 28211,
"name": "Производственная, научно-исследовательская работа / Research Internship",
"term": 2,
"course_project": False
},
{
"id": 28211,
"name": "Производственная, научно-исследовательская работа / Research Internship",
"term": 4,
"course_project": False
},
{
"id": 30109,
"name": "Производственная, научно-исследовательская работа / Research Work",
"term": 2,
"course_project": False
},
{
"id": 30956,
"name": "Производственная, научно-педагогическая",
"term": 4,
"course_project": False
},
{
"id": 24108,
"name": "Производственная, образовательно-проектировочная практика",
"term": 8,
"course_project": False
},
{
"id": 29245,
"name": "Производственная, организационно-управленческая",
"term": 4,
"course_project": False
},
{
"id": 29245,
"name": "Производственная, организационно-управленческая",
"term": 6,
"course_project": False
},
{
"id": 31087,
"name": "Производственная, организационно-управленческая, научно-педагогическая",
"term": 4,
"course_project": False
},
{
"id": 29488,
"name": "Производственная, организационно-управленческая, экспериментально-исследовательская работа",
"term": 4,
"course_project": False
},
{
"id": 29449,
"name": "Производственная, педагогическая",
"term": 4,
"course_project": False
},
{
"id": 28337,
"name": "Производственная, преддипломная",
"term": 4,
"course_project": False
},
{
"id": 29271,
"name": "Производственная, преддипломная / Senior Internship",
"term": 4,
"course_project": False
},
{
"id": 28563,
"name": "Производственная, проектная",
"term": 4,
"course_project": False
},
{
"id": 28552,
"name": "Производственная, проектная/ Project Internship",
"term": 4,
"course_project": False
},
{
"id": 29304,
"name": "Производственная, проектно-конструкторская",
"term": 4,
"course_project": False
},
{
"id": 29304,
"name": "Производственная, проектно-конструкторская",
"term": 6,
"course_project": False
},
{
"id": 24110,
"name": "Производственная, проектно-предпринимательская практика",
"term": 6,
"course_project": False
},
{
"id": 29247,
"name": "Производственная, проектно-технологическая",
"term": 4,
"course_project": False
},
{
"id": 29526,
"name": "Производственная, проектно-технологическая / Tech Project Internship",
"term": 4,
"course_project": False
},
{
"id": 29280,
"name": "Производственная, производственно-технологическая",
"term": 4,
"course_project": False
},
{
"id": 29280,
"name": "Производственная, производственно-технологическая",
"term": 6,
"course_project": False
},
{
"id": 29329,
"name": "Производственная, технологическая",
"term": 4,
"course_project": False
},
{
"id": 29329,
"name": "Производственная, технологическая",
"term": 6,
"course_project": False
},
{
"id": 28336,
"name": "Производственная, технологическая (проектно-технологическая)",
"term": 4,
"course_project": False
},
{
"id": 28336,
"name": "Производственная, технологическая (проектно-технологическая)",
"term": 6,
"course_project": False
},
{
"id": 35675,
"name": "Производственная, технологическая (проектно-технологическая) / Industrial and Tech Internship",
"term": 4,
"course_project": False
},
{
"id": 29268,
"name": "Производственная, технологическая (проектно-технологическая) / Tech Project Internship",
"term": 4,
"course_project": False
},
{
"id": 29430,
"name": "Производственная, технологическая / Tech Internship",
"term": 4,
"course_project": False
},
{
"id": 29246,
"name": "Производственная, эксплуатационная",
"term": 4,
"course_project": False
},
{
"id": 26317,
"name": "Производственные киберфизические системы",
"term": 2,
"course_project": False
},
{
"id": 30469,
"name": "<NAME>",
"term": 6,
"course_project": False
},
{
"id": 35029,
"name": "Промышленная робототехника и мехатронные системы",
"term": 2,
"course_project": False
},
{
"id": 26321,
"name": "<NAME>",
"term": 6,
"course_project": False
},
{
"id": 30911,
"name": "Промышленный интернет вещей и сервисов",
"term": 2,
"course_project": False
},
{
"id": 31560,
"name": "Протеомика/ Proteomics",
"term": 2,
"course_project": False
},
{
"id": 27296,
"name": "Протоколы информационной безопасности киберфизических систем",
"term": 2,
"course_project": False
},
{
"id": 26153,
"name": "Процессы и аппараты защиты окружающей среды",
"term": 6,
"course_project": False
},
{
"id": 34803,
"name": "Процессы и технологии разделения и глубокой очистки загрязненных сред / Processes and Technologies for the Separation and Deep Purification of Contaminated Media",
"term": 2,
"course_project": False
},
{
"id": 26334,
"name": "Процессы и устройства преобразования энергии",
"term": 2,
"course_project": False
},
{
"id": 27361,
"name": "Прямое преобразование энергии и возобновляемые источники энергии",
"term": 2,
"course_project": False
},
{
"id": 27689,
"name": "Прямое преобразование энергии и возобновляемые источники энергии / Direct Energy Conversion and Renewable Energy Sources",
"term": 2,
"course_project": False
},
{
"id": 34326,
"name": "Психология взаимодействия горожан с городской средой",
"term": 2,
"course_project": False
},
{
"id": 26343,
"name": "Психология визуального восприятия",
"term": 6,
"course_project": False
},
{
"id": 31046,
"name": "Психология личности и профессиональное самоопределение",
"term": 4,
"course_project": False
},
{
"id": 31046,
"name": "Психология личности и профессиональное самоопределение",
"term": 6,
"course_project": False
},
{
"id": 31046,
"name": "<NAME>ичности и профессиональное самоопределение",
"term": 8,
"course_project": False
},
{
"id": 2932,
"name": "<NAME>",
"term": 6,
"course_project": False
},
{
"id": 2932,
"name": "<NAME>",
"term": 8,
"course_project": False
},
{
"id": 26344,
"name": "<NAME>",
"term": 6,
"course_project": False
},
{
"id": 31045,
"name": "Психология профессиональной адаптации",
"term": 4,
"course_project": False
},
{
"id": 31045,
"name": "Психология профессиональной адаптации",
"term": 6,
"course_project": False
},
{
"id": 31045,
"name": "Психология профессиональной адаптации",
"term": 8,
"course_project": False
},
{
"id": 29678,
"name": "Психология развития и профессиональное образование",
"term": 6,
"course_project": False
},
{
"id": 26347,
"name": "Психология человеко-компьютерного взаимодействия",
"term": 2,
"course_project": False
},
{
"id": 35570,
"name": "Публичные выступления и презентации, сторителлинг",
"term": 2,
"course_project": False
},
{
"id": 31592,
"name": "Пьезоэлектрические исполнительные устройства",
"term": 2,
"course_project": False
},
{
"id": 31121,
"name": "Работа с данными в биотехнологиях и медицине",
"term": 2,
"course_project": False
},
{
"id": 37769,
"name": "Радиочастотные системы МРТ",
"term": 4,
"course_project": False
},
{
"id": 26354,
"name": "Развитие речи на материале научных технических текстов",
"term": 6,
"course_project": False
},
{
"id": 34989,
"name": "Разработка IDE",
"term": 2,
"course_project": False
},
{
"id": 28670,
"name": "Разработка Web-приложений / Web Software Development",
"term": 2,
"course_project": False
},
{
"id": 26369,
"name": "Разработка баз данных",
"term": 6,
"course_project": False
},
{
"id": 18680,
"name": "Разработка веб-приложений",
"term": 8,
"course_project": False
},
{
"id": 26376,
"name": "Разработка и внедрение распределенных систем",
"term": 2,
"course_project": False
},
{
"id": 26376,
"name": "Разработка и внедрение распределенных систем",
"term": 2,
"course_project": True
},
{
"id": 34436,
"name": "Разработка и расчет новых конструкций ферментаторов",
"term": 2,
"course_project": False
},
{
"id": 34436,
"name": "Разработка и расчет новых конструкций ферментаторов",
"term": 2,
"course_project": True
},
{
"id": 21698,
"name": "Разработка и сопровождение баз данных",
"term": 8,
"course_project": False
},
{
"id": 18707,
"name": "Разработка интеллектуальных систем",
"term": 8,
"course_project": False
},
{
"id": 26371,
"name": "Разработка компиляторов",
"term": 6,
"course_project": False
},
{
"id": 11602,
"name": "Разработка мобильных приложений",
"term": 6,
"course_project": False
},
{
"id": 26367,
"name": "Разработка мобильных приложений",
"term": 2,
"course_project": False
},
{
"id": 26367,
"name": "Разработка мобильных приложений",
"term": 6,
"course_project": False
},
{
"id": 26368,
"name": "Разработка мультимедийных приложений",
"term": 6,
"course_project": False
},
{
"id": 19169,
"name": "Разработка переносимых приложений",
"term": 8,
"course_project": False
},
{
"id": 34653,
"name": "Разработка приложений на Java",
"term": 6,
"course_project": False
},
{
"id": 31048,
"name": "Разработка программных модулей",
"term": 6,
"course_project": False
},
{
"id": 26360,
"name": "Разработка социальных программ и проектов",
"term": 2,
"course_project": False
},
{
"id": 26360,
"name": "Разработка социальных программ и проектов",
"term": 6,
"course_project": False
},
{
"id": 26379,
"name": "Разработка управляющих программ для станков с числовым программным управлением",
"term": 4,
"course_project": False
},
{
"id": 26379,
"name": "Разработка управляющих программ для станков с числовым программным управлением",
"term": 4,
"course_project": True
},
{
"id": 36629,
"name": "Рамановское и Мандельштам- бриллюэновское рассеяния в оптических волокнах и их применение / Raman and Mandelstam- Brillouin scattering within optical fibers and its application",
"term": | |
<reponame>Chromico/bk-base
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import json
from datetime import datetime
import mock
import pytest
from common.api.base import DataResponse
from datamanage.tests.datamodel.init_data import init_complete_model_instance # noqa
MODEL_ID = 1
DIM_MODEL_ID = 2
INSTANCE_ID = 1
PROJECT_ID = 1
FLOW_ID = 1
BK_BIZ_ID = 591
BK_USERNAME = 'unittest'
VERSION_ID = '96wsipgxk5hc4nzd7jqey1mbv8fau0lt'
INDICATOR_RESULT_TABLE_ID = '591_indicator_table'
MAIN_TABLE_ID = '591_main_table'
INDICATOR_RESULT_TABLE_ID = '591_indicator_table'
CALCULATION_ATOM_NAME = 'max_price'
INDICATOR_NAME = 'max_price_1d'
INDICATOR_ID = 1
UNKNOWN_INSTANCE_ID = 10000
UNKNOWN_MODEL_ID = 10000
UNKNOWN_TABLE_ID = '591_unknown_table'
OPERATION_LOG_PARAMS = {
"page": 1,
"page_size": 10,
'conditions': [{'key': 'object_operation', 'value': ['release']}, {'key': 'object_type', 'value': ['model']}],
'start_time': '2021-03-01 00:00:00',
'end_time': None,
'order_by_created_at': 'desc',
}
@pytest.fixture(scope='class')
def delete_model_instance_indicator():
from datamanage.pro.datamodel.models import application as ins_models
ins_models.DmmModelInstanceIndicator.objects.filter(
instance_id=INSTANCE_ID,
result_table_id=INDICATOR_RESULT_TABLE_ID,
).get().delete()
@pytest.fixture(scope='class')
def init_complete_model():
"""构建完整的数据模型"""
from datamanage.pro.datamodel.models.datamodel import (
DmmModelInfo,
DmmModelField,
DmmModelRelease,
DmmModelRelation,
DmmModelFieldStage,
)
from datamanage.pro.datamodel.models.indicator import DmmModelCalculationAtom, DmmModelIndicator
BK_USERNAME = 'unittest'
# 1) 创建主表
fact_model_params = json.loads(
'''{
"model_id": %s,
"model_name": "fact_model_name",
"model_alias": "创建模型的中文名",
"model_type": "fact_table",
"description": "创建模型的描述",
"project_id": 3
}'''
% MODEL_ID
)
fact_model_params['created_by'] = BK_USERNAME
fact_model_params['created_at'] = datetime.now()
model_obj = DmmModelInfo(**fact_model_params)
model_obj.save()
fact_model_id = model_obj.model_id
project_id = model_obj.project_id
# 2) 创建主表字段
master_table_fields_params = json.loads(
'''[
{
"model_id": %s,
"field_name": "price",
"field_alias": "道具价格",
"field_index": 1,
"field_type": "long",
"field_category": "measure",
"description": "道具价格",
"field_constraint_content": null,
"field_clean_content": {
"clean_option": "SQL",
"clean_content": "price as price"
},
"source_model_id": null,
"source_field_name": null
},
{
"model_id": %s,
"field_name": "channel_id",
"field_alias": "渠道号",
"field_index": 2,
"field_type": "string",
"field_category": "dimension",
"description": "渠道号",
"field_constraint_content": [],
"field_clean_content": null,
"source_model_id": null,
"source_field_name": null
},
{
"model_id": %s,
"field_name": "channel_name",
"field_alias": "渠道号名称",
"field_index": 3,
"field_type": "string",
"field_category": "dimension",
"description": "渠道号",
"field_constraint_content": [],
"field_clean_content": null,
"source_model_id": %s,
"source_field_name": "channel_name"
},
{
"model_id": %s,
"field_name": "time",
"field_alias": "时间字段",
"field_index": 4,
"field_type": "timestamp",
"field_category": "dimension",
"description": "平台内置时间字段,数据入库后将装换为可查询字段,比如 dtEventTime/dtEventTimeStamp/localtime",
"field_constraint_content": [],
"field_clean_content": null,
"source_model_id": null,
"source_field_name": null
}
]'''
% (MODEL_ID, MODEL_ID, MODEL_ID, DIM_MODEL_ID, MODEL_ID)
)
for field_dict in master_table_fields_params:
field_dict['model_id'] = fact_model_id
field_dict['created_by'] = BK_USERNAME
DmmModelField.objects.create(**field_dict)
master_table_fields_params[0]['origin_fields'] = ['price']
# 3) 创建统计口径
calc_atom_params = json.loads(
'''{
"model_id": %s,
"calculation_atom_name": "%s",
"calculation_atom_alias": "max_price",
"description": "max_price",
"field_type": "long",
"calculation_content": {
"option": "SQL",
"content": {
"calculation_formula":"max(price)"
}
}
}'''
% (MODEL_ID, CALCULATION_ATOM_NAME)
)
calc_atom_params['model_id'] = fact_model_id
calc_atom_params['created_by'] = BK_USERNAME
calc_atom_params['project_id'] = project_id
DmmModelCalculationAtom.objects.create(**calc_atom_params)
# 4)创建指标
indicator_params = json.loads(
'''{
"model_id": %s,
"indicator_name":"%s",
"indicator_alias":"1d最大价格",
"description":"1d最大价格",
"calculation_atom_name":"%s",
"aggregation_fields":[],
"filter_formula":"",
"scheduling_type":"batch",
"scheduling_content":{
"window_type":"fixed",
"count_freq":1,
"schedule_period":"day",
"fixed_delay":0,
"dependency_config_type":"unified",
"unified_config":{
"window_size":1,
"window_size_period":"day",
"dependency_rule":"all_finished"
},
"advanced":{
"recovery_times":3,
"recovery_enable":false,
"recovery_interval":"60m"
}
},
"parent_indicator_name":null
}'''
% (MODEL_ID, INDICATOR_NAME, CALCULATION_ATOM_NAME)
)
indicator_params['model_id'] = fact_model_id
indicator_params['created_by'] = BK_USERNAME
indicator_params['project_id'] = project_id
DmmModelIndicator.objects.create(**indicator_params)
# 5) 创建维度模型
dimension_model_params = json.loads(
'''{
"model_id": %s,
"model_name": "dimension_model_name",
"model_alias": "创建维度模型的中文名",
"model_type": "dimension_table",
"description": "创建维度模型的描述",
"project_id": 3
}'''
% DIM_MODEL_ID
)
dimension_model_params['created_by'] = BK_USERNAME
dim_model_obj = DmmModelInfo(**dimension_model_params)
dim_model_obj.save()
dimension_model_id = dim_model_obj.model_id
# 6)创建维度模型主表字段
dimension_master_table_params = json.loads(
'''[
{
"model_id": %s,
"field_name": "channel_id",
"field_alias": "渠道号",
"field_index": 1,
"field_type": "string",
"field_category": "dimension",
"is_primary_key": true,
"description": "渠道号",
"field_constraint_content": [],
"field_clean_content": null,
"source_model_id": null,
"source_field_name": null
},
{
"model_id": %s,
"field_name": "channel_name",
"field_alias": "渠道号名称",
"field_index": 2,
"field_type": "string",
"field_category": "dimension",
"is_primary_key": false,
"description": "渠道号名称",
"field_constraint_content": [],
"field_clean_content": null,
"source_model_id": null,
"source_field_name": null
}
]'''
% (DIM_MODEL_ID, DIM_MODEL_ID)
)
for field_dict in dimension_master_table_params:
field_dict['model_id'] = dimension_model_id
field_dict['created_by'] = BK_USERNAME
DmmModelField.objects.create(**field_dict)
DmmModelFieldStage.objects.create(**field_dict)
# 7)创建事实表和维度表关联
dimension_relation_params = json.loads(
'''{
"model_id": %s,
"field_name": "channel_id",
"related_method": "left-join",
"related_model_id": %s,
"related_field_name": "channel_id"
}'''
% (MODEL_ID, DIM_MODEL_ID)
)
dimension_relation_params['model_id'] = fact_model_id
dimension_relation_params['related_model_id'] = dimension_model_id
field_dict['created_by'] = BK_USERNAME
DmmModelRelation.objects.create(**dimension_relation_params)
# 8) 模型发布
datamodel_release_dict = json.loads(
'''{
"model_content": {
"model_id": %s,
"model_detail": {
"indicators": [
{
"model_id": null,
"calculation_atom_name": "%s",
"description": "1d最大价格",
"aggregation_fields_alias": [
],
"created_at": "2020-11-20 19:05:26",
"created_by": "admin",
"indicator_alias": "1d最大价格",
"scheduling_type": "batch",
"updated_at": "2020-11-20 19:05:26",
"filter_formula": "",
"parent_indicator_name": null,
"scheduling_content": {
"window_type": "fixed",
"count_freq": 1,
"dependency_config_type": "unified",
"schedule_period": "day",
"fixed_delay": 0,
"unified_config": {
"window_size": 1,
"dependency_rule": "all_finished",
"window_size_period": "day"
},
"advanced": {
"recovery_enable": false,
"recovery_times": 3,
"recovery_interval": "60m"
}
},
"updated_by": null,
"aggregation_fields": [
],
"project_id": 3,
"indicator_name": "max_price_1d"
}
],
"fields": [
],
"model_relation": [
],
"calculation_atoms": [
]
},
"description": "创建模型的描述",
"tags": [
],
"table_alias": "",
"created_by": "admin",
"publish_status": "published",
"model_alias": "创建模型的中文名",
"active_status": "active",
"updated_at": "2020-11-20 19:05:26",
"table_name": "",
"latest_version": null,
"model_type": "fact_table",
"step_id": 5,
"created_at": "2020-11-20 19:05:26",
"project_id": 3,
"model_name": "fact_model_name",
"updated_by": null
},
"version_log": "v1.0.0",
"version_id": "%s"
}'''
% (MODEL_ID, CALCULATION_ATOM_NAME, VERSION_ID)
)
fields_extra_infos = {
'price': {},
'channel_id': {
'is_join_field': True,
'is_extended_field': False,
'is_generated_field': False,
'join_field_name': None,
},
'channel_name': {
'is_join_field': False,
'is_extended_field': True,
'is_generated_field': False,
'join_field_name': 'channel_id',
},
'time': {},
}
for table_field_info in master_table_fields_params:
table_field_info.update(fields_extra_infos.get(table_field_info.get('field_name'), {}))
datamodel_release_dict['model_id'] = fact_model_id
datamodel_release_dict['model_content']['model_detail']['fields'] = master_table_fields_params
datamodel_release_dict['model_content']['model_detail']['calculation_atoms'] = [calc_atom_params]
datamodel_release_dict['model_content']['model_detail']['indicators'] = [indicator_params]
datamodel_release_dict['model_content']['model_detail']['model_relation'] = [dimension_relation_params]
datamodel_release_dict['created_by'] = BK_USERNAME
datamodel_release_object = DmmModelRelease(**datamodel_release_dict)
datamodel_release_object.save()
model_obj.latest_version = datamodel_release_object
model_obj.save()
@pytest.fixture(scope='class')
def patch_application_console_build():
build_func = mock.Mock()
build_func.return_value = 'SELECT * FROM unittest'
with mock.patch('datamanage.pro.datamodel.application.jobs.console.Console.build', build_func):
yield
@pytest.fixture(scope='class')
def patch_sql_validate():
patch_func = mock.Mock()
patch_func.return_value = (True, {'source_columns': {}, 'condition_columns': []})
with mock.patch('datamanage.pro.datamodel.handlers.verifier.SQLVerifier.check_res_schema', patch_func):
yield
@pytest.fixture(scope='function')
def patch_source_columns():
patch_func = mock.Mock()
patch_func.return_value = {'fields': {'price': ['source_field1']}}
with mock.patch(
target='datamanage.pro.datamodel.dmm.model_instance_manager.get_sql_source_columns_mapping', new=patch_func
):
yield
@pytest.fixture(scope='function')
def patch_flow_create():
patch_func = mock.Mock()
patch_func.return_value = DataResponse(
{
'data': {
'flow_id': FLOW_ID,
},
'result': True,
'code': '1500200',
'message': 'ok',
'errors': {},
}
)
with mock.patch(
target='datamanage.pro.datamodel.dmm.model_instance_flow_manager.DataflowApi.flows.create', new=patch_func
):
yield
@pytest.fixture(scope='function')
def patch_flow_nodes_create():
patch_func = mock.Mock()
patch_func.return_value = DataResponse(
{
'data': {
'flow_id': FLOW_ID,
'node_ids': [1, 2, 3, 4, 5],
},
'result': True,
'code': '1500200',
'message': 'ok',
'errors': {},
}
)
with mock.patch(
target='datamanage.pro.datamodel.views.application_model_views.DataflowApi.flows.create_by_nodes',
new=patch_func,
):
yield
@pytest.fixture(scope='function')
def patch_rt():
def side_effect(params, *args, **kwargs):
result_table_id = params.get('result_table_id')
return DataResponse(
{
'data': {
'result_table_id': result_table_id,
'result_table_name': '',
'result_table_name_alias': '',
},
'result': True,
'code': '1500200',
'message': 'ok',
'errors': {},
}
)
patch_func = mock.MagicMock(side_effect=side_effect)
with mock.patch(
target='datamanage.pro.datamodel.views.application_model_views.MetaApi.result_tables.retrieve', new=patch_func
):
yield
@pytest.fixture(scope='function')
def patch_rt_fields():
result_table_fields = {
'591_source_table1': [
{'field_name': 'price', 'field_type': 'long'},
{'field_name': 'channel_id', 'field_type': 'string'},
],
'591_source_dim_table1': [
{'field_name': 'channel_id', 'field_type': 'string'},
{'field_name': 'channel_name', 'field_type': 'string'},
],
}
def side_effect(params, *args, **kwargs):
result_table_id = params.get('result_table_id')
return DataResponse(
{
'data': result_table_fields.get(result_table_id, []),
'result': True,
'code': '1500200',
'message': 'ok',
'errors': {},
}
)
patch_func = mock.MagicMock(side_effect=side_effect)
with mock.patch(
target='datamanage.pro.datamodel.dmm.model_instance_manager.MetaApi.result_tables.fields', new=patch_func
):
yield
@pytest.fixture(scope='class')
def init_model_info():
"""构建完整的数据模型"""
# 创建模型基本信息
from datamanage.pro.datamodel.models.datamodel import DmmModelInfo
fact_model_params = json.loads(
'''{
"model_id": %s,
"model_name": "fact_model_name",
"model_alias": "创建模型的中文名",
"model_type": "fact_table",
"description": "创建模型的描述",
"project_id": %s
}'''
% (MODEL_ID, PROJECT_ID)
)
fact_model_params['created_by'] = BK_USERNAME
fact_model_params['created_at'] = datetime.now()
model_obj = DmmModelInfo(**fact_model_params)
model_obj.save()
@pytest.fixture(scope='class')
def init_master_table():
"""构建模型主表"""
from datamanage.pro.datamodel.models.datamodel import (
DmmModelFieldStage,
)
master_table_field_list = json.loads(
'''
[
{
"field_name": "price",
"field_alias": "道具价格",
"field_index": 1,
"field_type": "long",
"field_category": "measure",
"is_primary_key": false,
"description": "道具价格",
"field_constraint_content": null,
"field_clean_content": {
"clean_option": "SQL",
"clean_content": "price as price"
},
"source_model_id": null,
"source_field_name": null
},
{
"field_name": "channel_id",
"field_alias": "渠道号",
"field_index": 2,
"field_type": "string",
"field_category": "dimension",
"is_primary_key": false,
"description": "渠道号",
"field_constraint_content": [],
"field_clean_content": null,
"source_model_id": null,
"source_field_name": null
},
{
"field_name": "_time_",
"field_alias": "时间字段",
"field_index": 3,
"field_type": "timestamp",
"field_category": "dimension",
"is_primary_key": false,
"description": "平台内置时间字段,数据入库后将转换为可查询字段,比如 dtEventTime/dtEventTimeStamp/localtime",
"field_constraint_content": [],
"field_clean_content": null,
"source_model_id": null,
"source_field_name": null
}
]'''
)
for field_dict in master_table_field_list:
field_dict['created_by'] = BK_USERNAME
field_dict['model_id'] = MODEL_ID
field_obj = DmmModelFieldStage(**field_dict)
field_obj.save()
@pytest.fixture(scope='class')
def init_calculation_atom():
"""构建统计口径"""
from datamanage.pro.datamodel.models.indicator import DmmModelCalculationAtom
calc_atom_params = json.loads(
'''
{
"model_id": %s,
"project_id": %s,
"calculation_atom_name": "%s",
"calculation_atom_alias": "创建统计口径的中文名",
"description": "创建统计口径的描述",
"field_type": "long",
"calculation_content": {
"option": "TABLE",
"content": {
"calculation_field": "price",
"calculation_function": "max",
| |
net.add_lane("emser", "emexr",
StraightLane(np.array([491, -196]), np.array([501, -202]), line_types=[n, n], forbidden=True))
net.add_lane("emsel", "emwxl",
StraightLane(np.array([487, -196]), np.array([467, -210]), line_types=[n, n], forbidden=True))
net.add_lane("emsel", "emnxl",
StraightLane(np.array([487, -196]), np.array([491, -220]), line_types=[n, n], forbidden=True))
net.add_lane("emeer", "emnxr",
StraightLane(np.array([501, -214]), np.array([495, -220]), line_types=[n, n], forbidden=True))
net.add_lane("emeel", "emsxl",
StraightLane(np.array([501, -210]), np.array([483, -196]), line_types=[n, n], forbidden=True))
net.add_lane("emeel", "emwxl",
StraightLane(np.array([501, -210]), np.array([467, -210]), line_types=[n, n], forbidden=True))
"""
straight road of east
"""
net.add_lane("inter_em_2", "interse4",
StraightLane(np.array([475, -146]), np.array([475, -62]), line_types=[s, c]))
net.add_lane("inter_em_2", "interse4",
StraightLane(np.array([479, -146]), np.array([479, -62]), line_types=[s, s]))
net.add_lane("inter_em_2", "interse4",
StraightLane(np.array([483, -146]), np.array([483, -62]), line_types=[c, s]))
net.add_lane("inter_se_4", "interem2",
StraightLane(np.array([487, -62]), np.array([487, -146]), line_types=[c, s]))
net.add_lane("inter_se_4", "interem2",
StraightLane(np.array([491, -62]), np.array([491, -146]), line_types=[s, c]))
"""
crossroad of southeast
"""
net.add_lane("interse4", "sener",
StraightLane(np.array([475, -62]), np.array([475, -12]), line_types=[c, c], forbidden=True))
net.add_lane("interse4", "senem",
StraightLane(np.array([479, -62]), np.array([479, -12]), line_types=[c, c], forbidden=True))
net.add_lane("interse4", "senel",
StraightLane(np.array([483, -62]), np.array([483, -12]), line_types=[c, c], forbidden=True))
net.add_lane("senxl", "inter_se_4",
StraightLane(np.array([487, -12]), np.array([487, -62]), line_types=[c, s]))
net.add_lane("senxr", "inter_se_4",
StraightLane(np.array([491, -12]), np.array([491, -62]), line_types=[s, c]))
net.add_lane("sesxr", "inter_se_2",
StraightLane(np.array([475, 12]), np.array([475, 62]), line_types=[s, c]))
net.add_lane("sesxl", "inter_se_2",
StraightLane(np.array([479, 12]), np.array([479, 62]), line_types=[c, s]))
net.add_lane("inter_se_2", "sesx",
StraightLane(np.array([475, 62]), np.array([475, 112]), line_types=[s, c]))
net.add_lane("inter_se_2", "sesx",
StraightLane(np.array([479, 62]), np.array([479, 112]), line_types=[c, s]))
net.add_lane("interse2", "sesel",
StraightLane(np.array([483, 62]), np.array([483, 12]), line_types=[c, c], forbidden=True))
net.add_lane("interse2", "seser",
StraightLane(np.array([487, 62]), np.array([487, 12]), line_types=[c, c], forbidden=True))
net.add_lane("sese", "interse2",
StraightLane(np.array([483, 112]), np.array([483, 62]), line_types=[c, s]))
net.add_lane("sese", "interse2",
StraightLane(np.array([487, 112]), np.array([487, 62]), line_types=[s, c]))
net.add_lane("interse1", "sewel",
StraightLane(np.array([418, 0]), np.array([468, 0]), line_types=[c, c], forbidden=True))
net.add_lane("interse1", "sewer",
StraightLane(np.array([418, 4]), np.array([468, 4]), line_types=[c, c], forbidden=True))
net.add_lane("sewxr", "inter_se_1",
StraightLane(np.array([468, -4]), np.array([418, -4]), line_types=[c, s]))
net.add_lane("sewxl", "inter_se_1",
StraightLane(np.array([468, -8]), np.array([418, -8]), line_types=[s, c]))
net.add_lane("seexl", "inter_se_3",
StraightLane(np.array([496, 0]), np.array([546, 0]), line_types=[c, s]))
net.add_lane("seexr", "inter_se_3",
StraightLane(np.array([496, 4]), np.array([546, 4]), line_types=[s, c]))
net.add_lane("inter_se_3", "seex",
StraightLane(np.array([546, 0]), np.array([596, 0]), line_types=[c, s]))
net.add_lane("inter_se_3", "seex",
StraightLane(np.array([546, 4]), np.array([596, 4]), line_types=[s, c]))
net.add_lane("seee", "interse3",
StraightLane(np.array([596, -4]), np.array([546, -4]), line_types=[c, s]))
net.add_lane("seee", "interse3",
StraightLane(np.array([596, -8]), np.array([546, -8]), line_types=[s, c]))
net.add_lane("interse3", "seeel",
StraightLane(np.array([546, -4]), np.array([496, -4]), line_types=[c, c], forbidden=True))
net.add_lane("interse3", "seeer",
StraightLane(np.array([546, -8]), np.array([496, -8]), line_types=[c, c], forbidden=True))
# bellow: fulfill the turning lanes for vehicles to turn
net.add_lane("sewer", "sesxr",
StraightLane(np.array([468, 4]), np.array([475, 12]), line_types=[n, n], forbidden=True))
net.add_lane("sewel", "senxl",
StraightLane(np.array([468, 0]), np.array([487, -12]), line_types=[n, n], forbidden=True))
net.add_lane("sewel", "seexl",
StraightLane(np.array([468, 0]), np.array([496, 0]), line_types=[n, n], forbidden=True))
net.add_lane("seser", "seexr",
StraightLane(np.array([487, 12]), np.array([496, 4]), line_types=[n, n], forbidden=True))
net.add_lane("sesel", "sewxl",
StraightLane(np.array([483, 12]), np.array([468, -8]), line_types=[n, n], forbidden=True))
net.add_lane("sesel", "senxl",
StraightLane(np.array([483, 12]), np.array([487, -12]), line_types=[n, n], forbidden=True))
net.add_lane("seeer", "senxr",
StraightLane(np.array([496, -8]), np.array([491, -12]), line_types=[n, n], forbidden=True))
net.add_lane("seeel", "sesxl",
StraightLane(np.array([496, -4]), np.array([479, 12]), line_types=[n, n], forbidden=True))
net.add_lane("seeel", "sewxl",
StraightLane(np.array([496, -4]), np.array([468, -8]), line_types=[n, n], forbidden=True))
net.add_lane("sener", "sewxr",
StraightLane(np.array([475, -12]), np.array([468, -4]), line_types=[n, n], forbidden=True))
net.add_lane("senem", "sesxl",
StraightLane(np.array([479, -12]), np.array([479, 12]), line_types=[n, n], forbidden=True))
net.add_lane("senel", "seexl",
StraightLane(np.array([483, -12]), np.array([496, 0]), line_types=[n, n], forbidden=True))
"""
straight road of south
"""
net.add_lane("intersw_3", "intersm_1",
StraightLane(np.array([228, 0]), np.array([258, 0]), line_types=[c, s]))
net.add_lane("intersw_3", "intersm_1",
StraightLane(np.array([228, 4]), np.array([258, 4]), line_types=[s, c]))
net.add_lane("intersm1", "intersw3",
StraightLane(np.array([258, -4]), np.array([228, -4]), line_types=[c, s]))
net.add_lane("intersm1", "intersw3",
StraightLane(np.array([258, -8]), np.array([228, -8]), line_types=[s, c]))
# net.add_lane(" ", " ",
# StraightLane(np.array([125, -12]), np.array([120, -12]), line_types=[n, c]))
# net.add_lane(" ", " ",
# StraightLane(np.array([120, -12]), np.array([115, -8]), line_types=[n, c]))
net.add_lane("inter_sm_1", "inter_sm_2",
StraightLane(np.array([258, 0]), np.array([288, 0]), line_types=[c, s]))
net.add_lane("inter_sm_1", "inter_sm_2",
StraightLane(np.array([258, 4]), np.array([288, 4]), line_types=[s, c]))
net.add_lane("intersm2", "intersm1",
StraightLane(np.array([288, -4]), np.array([258, -4]), line_types=[c, s]))
net.add_lane("intersm2", "intersm1",
StraightLane(np.array([288, -8]), np.array([258, -8]), line_types=[s, s]))
net.add_lane("intersm2", "intersm1",
StraightLane(np.array([288, -12]), np.array([258, -12]), line_types=[s, c]))
# net.add_lane(" ", " ",
# StraightLane(np.array([155, -8]), np.array([152, -12]), line_types=[n, c]))
net.add_lane("inter_sm_2", "inter_sm_3",
StraightLane(np.array([288, 0]), np.array([318, 0]), line_types=[c, s]))
net.add_lane("inter_sm_2", "inter_sm_3",
StraightLane(np.array([288, 4]), np.array([318, 4]), line_types=[s, c]))
net.add_lane("intersm3", "intersm2",
StraightLane(np.array([318, -4]), np.array([288, -4]), line_types=[c, s]))
net.add_lane("intersm3", "intersm2",
StraightLane(np.array([318, -8]), np.array([288, -8]), line_types=[s, c]))
# net.add_lane(" ", " ",
# StraightLane(np.array([170, -12]), np.array([165, -12]), line_types=[n, c]))
# net.add_lane(" ", " ",
# StraightLane(np.array([165, -12]), np.array([160, -8]), line_types=[n, c]))
net.add_lane("inter_sm_3", "inter_sm_4",
StraightLane(np.array([318, 0]), np.array([348, 0]), line_types=[c, s]))
net.add_lane("inter_sm_3", "inter_sm_4",
StraightLane(np.array([318, 4]), np.array([348, 4]), line_types=[s, c]))
net.add_lane("intersm4", "intersm3",
StraightLane(np.array([348, -4]), np.array([318, -4]), line_types=[c, s]))
net.add_lane("intersm4", "intersm3",
StraightLane(np.array([348, -8]), np.array([318, -8]), line_types=[s, s]))
net.add_lane("intersm4", "intersm3",
StraightLane(np.array([348, -12]), np.array([318, -12]), line_types=[s, c]))
net.add_lane("inter_sm_4", "inter_sm_5",
StraightLane(np.array([348, 0]), np.array([378, 0]), line_types=[c, s]))
net.add_lane("inter_sm_4", "inter_sm_5",
StraightLane(np.array([348, 4]), np.array([378, 4]), line_types=[s, c]))
net.add_lane("intersm5", "intersm4",
StraightLane(np.array([378, -4]), np.array([348, -4]), line_types=[c, s]))
net.add_lane("intersm5", "intersm4",
StraightLane(np.array([378, -8]), np.array([348, -8]), line_types=[s, c]))
net.add_lane("inter_sm_5", "interse1",
StraightLane(np.array([378, 0]), np.array([418, 0]), line_types=[c, s]))
net.add_lane("inter_sm_5", "interse1",
StraightLane(np.array([378, 4]), np.array([418, 4]), line_types=[s, c]))
# net.add_lane(" ", " ",
# StraightLane(np.array([199, -8]), np.array([195, -12]), line_types=[n, c]))
net.add_lane("inter_se_1", "intersm5",
StraightLane(np.array([418, -4]), np.array([378, -4]), line_types=[c, s]))
net.add_lane("inter_se_1", "intersm5",
StraightLane(np.array([418, -8]), np.array([378, -8]), line_types=[s, s]))
net.add_lane("inter_se_1", "intersm5",
StraightLane(np.array([418, -12]), np.array([378, -12]), line_types=[s, c]))
road = Road(network=net, np_random=self.np_random)
green_time = 5
red_time = 8
green_flash_time = 2
yellow_time = 1
"""
southwest crossroad traffic lights
"""
self.traffic_lights["red_sw"] = [
RedLight(road, [150, 0], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [150, 4], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [167, 8], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [171, 8], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [178, -8], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [178, -4], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [159, -12], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [163, -12], red_time, green_time, green_flash_time, yellow_time, 0),
]
"""
west middle crossroad traffic lights
"""
self.traffic_lights["red_wm"] = [
RedLight(road, [150, -216], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [150, -220], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [167, -212], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [171, -212], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [180, -228], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [180, -224], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [159, -232], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [163, -232], red_time, green_time, green_flash_time, yellow_time, 0)
]
"""
northwest crossroad traffic light
"""
self.traffic_lights["red_nw"] = [
RedLight(road, [150, -428], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [150, -432], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [167, -424], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [171, -424], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [180, -448], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [180, -444], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [159, -452], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [163, -452], red_time, green_time, green_flash_time, yellow_time, 0)
]
"""
northeast crossroad traffic light
"""
self.traffic_lights["red_ne"] = [
RedLight(road, [471, -440], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [471, -436], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [471, -432], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [471, -428], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [471, -424], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [503, -448], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [503, -444], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [479, -456], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [483, -456], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [491, -420], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [495, -420], red_time, green_time, green_flash_time, yellow_time, 0)
]
"""
east middle crossroad traffic light
"""
self.traffic_lights["red_em"] = [
RedLight(road, [467, -202], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [467, -206], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [501, -214], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [501, -210], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [475, -220], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [479, -220], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [483, -220], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [487, -220], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [491, -196], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [487, -196], red_time, green_time, green_flash_time, yellow_time, 0)
]
"""
southeast crossroad traffic light
"""
self.traffic_lights["red_se"] = [
RedLight(road, [468, 0], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [468, 4], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [496, -8], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [496, -4], red_time, green_time, green_flash_time, yellow_time, 1),
RedLight(road, [475, -12], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [479, -12], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [483, -12], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [483, 12], red_time, green_time, green_flash_time, yellow_time, 0),
RedLight(road, [487, 12], red_time, green_time, green_flash_time, yellow_time, 0)
]
self.road = road
# for lane_index in road.network.LANES:
# _from, _to, _id = lane_index
# _before = None
# next_to = None
# lane = self.road.network.get_lane(lane_index)
# try:
# next_to = list(self.road.network.graph[_to].keys())[
# np.random.randint(len(self.road.network.graph[_to]))]
# if len(self.road.network.graph[_from][_to]) <= len(self.road.network.graph[_to][next_to]):
# next_id = _id
# | |
the parameter list
Returns
dict: Dictionary containing featurizer specific metadata as a subdict under the keys
['ECFPSpecific','AutoencoderSpecific']
"""
feat_metadata = {}
# MJT: I changed params.featurizer in this instance to self.feat_type to be syntactically consistent
if self.feat_type == 'ecfp':
ecfp_params = dict(ecfp_radius = params.ecfp_radius,
ecfp_size = params.ecfp_size)
feat_metadata['ECFPSpecific'] = ecfp_params
elif self.feat_type == 'graphconv':
# No graph conv specific params at present
pass
elif self.feat_type == 'molvae':
# TODO: If the parameter name for the model file changes to 'autoencoder_model_key', change it below.
mol_vae_params = {'autoencoder_model_key': params.mol_vae_model_file}
feat_metadata['AutoencoderSpecific'] = mol_vae_params
return feat_metadata
# ****************************************************************************************
class PersistentFeaturization(Featurization):
"""Subclass for featurizers that support persistent storage of featurized data. Used when computing or mapping
the features is CPU- or memory-intensive, e.g. descriptors. Currently DescriptorFeaturization is the only subclass,
but others are planned (e.g., UMAPDescriptorFeaturization).
"""
def __init__(self, params):
"""Initializes a PersistentFeaturization object. This is a good place to load data used by the featurizer,
such as a table of descriptors.
Args:
params (Namespace): Contains parameters to be used to instantiate a featurizer.
"""
super().__init__(params)
# ****************************************************************************************
def extract_prefeaturized_data(self, merged_dset_df, model_dataset):
"""Attempts to extract prefeaturized data for the given dataset.
Args:
merged_dset_df (DataFrame): dataset merged with the featurizers
model_dataset (ModelDataset): Object containing the dataset to be featurized
Raises:
NotImplementedError: Currently, only DescriptorFeaturization is supported, is not a generic method
"""
# TODO: Is it possible to implement this generically for all persistent featurizers?
raise NotImplementedError
# ****************************************************************************************
def featurize_data(self, dset_df, model_dataset):
"""Perform featurization on the given dataset.
Args:
dset_df (DataFrame): A table of data to be featurized. At minimum, should include columns.
for the compound ID and assay value; for some featurizers, must also contain a SMILES string column.
model_dataset (ModelDataset): Object containing the dataset to be featurized
Returns:
Tuple of (features, ids, vals, attr).
features (np.array): Feature matrix.
ids (pd.DataFrame): compound IDs or SMILES strings if needed for splitting.
attr (pd.DataFrame): dataframe containing SMILES strings indexed by compound IDs.
vals (np.array): array of response values.
Raises:
NotImplementedError: Currently, only DescriptorFeaturization is supported, is not a generic method
"""
#TODO: Add comment describing why this is not implemented
raise NotImplementedError
# ****************************************************************************************
def create_feature_transformer(self, dataset):
"""Fit a scaling and centering transformation to the feature matrix of the given dataset, and return a
DeepChem transformer object holding its parameters.
Args:
dataset (deepchem.Dataset): featurized dataset
"""
#TODO: Add comment describing why this is always returning an empty list
return []
# ****************************************************************************************
class DescriptorFeaturization(PersistentFeaturization):
"""Subclass for featurizers that map sets of (usually) precomputed descriptors to compound IDs; the resulting merged
dataset is persisted to the filesystem or datastore.
Attributes:
Set in __init_:
feat_type (str): Type of featurizer, set in super.(__init__)
descriptor_type (str): The type of descriptor
descriptor_key (str): The path to the descriptor featurization matrix if it saved to a file,
or the key to the file in the Datastore
descriptor_base (str/path): The base path to the descriptor featurization matrix
precomp_descr_table (pd.DataFrame): initialized as an empty DataFrame, will be overridden to contain the
full descriptor table
Class attributes:
supported_descriptor_types
all_desc_col
"""
supported_descriptor_types = []
desc_type_cols = {}
desc_type_scaled = {}
desc_type_source = {}
# ****************************************************************************************
# (ksm): Made this a class method. A DescriptorFeaturization instance only supports
# one descriptor_type, so making the list of supported descriptor types an instance attribute
# was misleading.
@classmethod
def load_descriptor_spec(cls, desc_spec_bucket, desc_spec_key) :
"""Read a descriptor specification table from the datastore or the filesystem.
The table is a CSV file with the following columns:
descr_type: A string specifying a descriptor source/program and a subset of descriptor columns
source: Name of the program/package that generates the descriptors
scaled: Binary indicator for whether subset of descriptor values are scaled by molecule's atom count
descriptors: A semicolon separated list of descriptor columns.
The values in the table are used to set class variables desc_type_cols, desc_type_source and desc_type_scaled.
Args:
desc_spec_bucket : bucket where descriptor spec is located
desc_spec_key: data store key, or full file path to locate descriptor spec object
Returns:
None
Side effects:
Sets the following class variables:
cls.desc_type_cols -> map from decriptor types to their associated descriptor column names
cls.desc_type_source -> map from decriptor types to the program/package that generates them
cls.desc_type_scaled -> map from decriptor types to boolean indicators of whether some descriptor
values are scaled.
cls.supported_descriptor_types -> the list of available descriptor types
"""
try:
ds_client = dsf.config_client()
except Exception as e:
print('Exception when trying to connect to the datastore:')
print(e)
ds_client = None
cls.desc_type_cols = {}
cls.desc_type_scaled = {}
cls.desc_type_source = {}
# If a datastore client is not detected or a datastore bucket is not specified
# assume that the ds_key is a full path pointer to a file on the file system
if ds_client == None or desc_spec_bucket == '':
desc_spec_df = pd.read_csv(desc_spec_key, index_col=False)
else :
# Try the descriptor_spec_key parameter first, then fall back to package file
try:
desc_spec_df = dsf.retrieve_dataset_by_datasetkey(desc_spec_key, desc_spec_bucket, ds_client)
except:
script_dir = os.path.dirname(os.path.realpath(__file__))
desc_spec_key_fallback = script_dir+'/../data/descriptor_sets_sources_by_descr_type.csv'
desc_spec_df = dsf.retrieve_dataset_by_datasetkey(desc_spec_key_fallback, desc_spec_bucket, ds_client)
for desc_type, source, scaled, descriptors in zip(desc_spec_df.descr_type.values,
desc_spec_df.source.values,
desc_spec_df.scaled.values,
desc_spec_df.descriptors.values):
cls.desc_type_cols[desc_type] = descriptors.split(';')
cls.desc_type_source[desc_type] = source
cls.desc_type_scaled[desc_type] = bool(scaled)
cls.supported_descriptor_types = list(cls.desc_type_source.keys())
def __init__(self, params):
"""Initializes a DescriptorFeaturization object. This is a good place to load data used by the featurizer,
such as a table of descriptors.
Args:
params (Namespace): Contains parameters to be used to instantiate a featurizer.
Side effects:
Sets the following attributes of DescriptorFeaturization:
feat_type (str): Type of featurizer, set in __init__
descriptor_type (str): The type of descriptor
descriptor_key (str): The path to the precomputed descriptor table if it is saved to a file, or
the key to the file in the Datastore
descriptor_base (str/path): The base name of the precomputed descriptor table file, without the
directory and extension
precomp_descr_table (pd.DataFrame): The precomputed descriptor table itself. Initialized as an empty
DataFrame, will be replaced later on first call to featurize_data().
desc_id_col (str): Name of the column in precomp_descr_table containing compound IDs
desc_smiles_col (str): Name of the column in precomp_descr_table, if any, containing compound SMILES
"""
super().__init__(params)
cls = self.__class__
# JEA: load mapping between descriptor types and lists of descriptors
if len(cls.supported_descriptor_types) == 0:
# Try the descriptor_spec_key parameter first, then fall back to package file
try:
cls.load_descriptor_spec(params.descriptor_spec_bucket, params.descriptor_spec_key)
except:
script_dir = os.path.dirname(os.path.realpath(__file__))
desc_spec_key_fallback = script_dir+'/../data/descriptor_sets_sources_by_descr_type.csv'
cls.load_descriptor_spec(params.descriptor_spec_bucket, desc_spec_key_fallback)
if not params.descriptor_type in cls.supported_descriptor_types:
raise ValueError("Unsupported descriptor type %s" % params.descriptor_type)
self.descriptor_type = params.descriptor_type
self.descriptor_key = params.descriptor_key
if self.descriptor_key is not None:
self.descriptor_base = os.path.splitext(os.path.basename(params.descriptor_key))[0]
else:
self.descriptor_base = None
self.desc_id_col = None
self.desc_smiles_col = None
# Load an empty descriptor table. We'll load the real table later the first time we need it.
self.precomp_descr_table = pd.DataFrame()
# ****************************************************************************************
def __str__(self):
"""Returns a human-readable description of this Featurization object.
Returns:
(str): Describes the featurization type
"""
return "DescriptorFeaturization with %s descriptors" % self.descriptor_type
# ****************************************************************************************
def extract_prefeaturized_data(self, merged_dset_df, model_dataset):
"""Attempts to retrieve prefeaturized data for the given dataset.
Args:
merged_dset_df (pd.DataFrame): dataset merged with the featurizers
model_dataset (ModelDataset): Object containing the dataset to be featurized
# TODO: Remove model_dataset call once params.response_cols is properly set
Returns:
Tuple of (features, ids, vals, attr).
features (np.array): Feature matrix.
ids (pd.DataFrame): compound IDs or SMILES strings if needed for splitting.
attr (pd.DataFrame): dataframe containing SMILES strings indexed by compound IDs.
vals (np.array): array of response values.
"""
model_dataset.check_task_columns(merged_dset_df)
user_specified_features = self.get_feature_columns()
featurizer_obj = dc.feat.UserDefinedFeaturizer(user_specified_features)
features = dc.data.data_loader.get_user_specified_features(merged_dset_df, featurizer=featurizer_obj,
verbose=False)
features = features.astype(float)
ids = merged_dset_df[model_dataset.params.id_col]
vals = merged_dset_df[model_dataset.params.response_cols].values
attr = get_dataset_attributes(merged_dset_df, model_dataset.params)
return features, ids, vals, attr
# ****************************************************************************************
def load_descriptor_table(self, params):
"""
Load the table of precomputed feature values for the descriptor type specified in params, from
the datastore_key or path specified by params.descriptor_key and params.descriptor_bucket. Will try
to load the table | |
'''
TODO:
- avg training loss
- baseline? ie what is the prediction
'''
from tokenizers import CharBPETokenizer
import torch
import torch.nn as nn
from torchtext import data
from picotext.model import RNN_lm, RNN_tr
from picotext.utils import batchify, get_batch, repackage_hidden
from picotext.utils import load_pretrained_tokenizer, load_config
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# TODO: should work w/o pretraining
'''
We trained the LM w/o padding, but for classification we will. Note that the RNN is length independent, i.e. it is a (stack of) hidden layers unrolled along the sequence. So for classification, we should be able to use padding.
'''
def train_fn():
# Turn on training mode which enables dropout.
model.train() # defaults to train anyway, here to make this explicit
total_loss, n = 0., 0
hidden = model.init_hidden(batch_size)
for i, batch in enumerate(train_iter):
# print(batch.text[0].shape)
# print(hidden.shape)
if len(batch) != batch_size:
print('Damn')
continue
# The overflow examples' batch is smaller, ignore. Otherwise creates
# a RuntimeError. Known problem w/ data.BucketIterator():
# https://github.com/pytorch/text/issues/640
# https://github.com/pytorch/text/issues/438
# stackoverflow.com/questions/54307824
# print(batch.text[0].shape)
# if batch.text[0].shape[-1] != 100:
# break
# To overfit one batch do
# ... in [next(enumerate(range(0, train_data.size(0) - 1, bptt)))]
# Then revert
# ... in enumerate(range(0, train_data.size(0) - 1, bptt))
data_, targets = batch.text[0], batch.label
# print(data, targets)
# print(len(data[:, 0]))
optimizer.zero_grad()
# model.zero_grad()
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden = repackage_hidden(hidden)
'''
TODO: the vocab is built from the training set but a minimal one and so
we can encounter words that are not present in the embedding lookup table
IndexError: index out of range in self
https://discuss.pytorch.org/t/embeddings-index-out-of-range-error/12582/4
https://stackoverflow.com/questions/50747947/embedding-in-pytorch
once we use the full data this should not be an issue
'''
#try:
output, hidden = model(data_, hidden)
#except IndexError:
# continue
loss = criterion(output.squeeze(1), targets)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
total_loss += loss.item()
# for p in model.parameters():
# p.data.add_(-lr, p.grad)
# total_loss += loss.item()
if (i % log_interval == 0) and (i != 0):
print(epoch, i, round(total_loss / log_interval, 4))
total_loss = 0.
# cur_loss = total_loss / log_interval
# elapsed = time.time() - start_time
# print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
# 'loss {:5.2f} | ppl {:8.2f}'.format(
# epoch, batch, len(train_data) // bptt, lr,
# elapsed * 1000 / log_interval, cur_loss, math.exp(cur_loss)))
# total_loss = 0
# start_time = time.time()
def evaluate():
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss, total_acc, n = 0., 0., 0
hidden = model.init_hidden(batch_size)
with torch.no_grad():
for i, batch in enumerate(dev_iter):
if len(batch) != batch_size:
print('Damn')
continue
data_, targets = batch.text[0], batch.label
output, hidden = model(data_, hidden)
hidden = repackage_hidden(hidden)
# loss = criterion(output, targets)
total_loss += len(data_) * criterion(output.squeeze(1), targets).item()
n += len(batch)
#total_acc += binary_accuracy(output.squeeze(1), targets).item()
loss_avg = round(total_loss / n, 4)
acc_avg = round(total_acc / n, 4)
print('Dev loss:', loss_avg)
# print('Dev acc: ', acc_avg)
print('Acc')
print(torch.sum(torch.round(output).T==targets).item()/len(targets))
def binary_accuracy(preds, y):
"""
slight modification from original
Returns accuracy per batch, i.e. if you get 8/10 right, this returns 0.8, NOT 8
"""
#round predictions to the closest integer
rounded_preds = torch.round(preds)
correct = (rounded_preds.T == y).float() #convert into float for division
acc = correct.sum() / len(correct)
return acc
tokenizer = load_pretrained_tokenizer(CharBPETokenizer, '/Users/phi/Dropbox/repos_git/picotext/picotext/tokenizers/uniref50.full.dayhoff.vocab30k.freq5')
ntokens = len(tokenizer.get_vocab())
print(f'Found {ntokens} tokens')
nclass = 1
# Instead of ntokens we pass in nclass here
# init_args =['GRU', nclass, emsize, nhid, nlayers, dropout, tied]
batch_size = 250
# Can be a list w/ sizes for train, dev, test -- but we'd need to rewrite
# train and evaluate fn
bptt = 30
clip = 0.5
log_interval = 25
lr = 3e-4 #20
best_val_loss = None
epochs = 10
save = 'foo'
emsize = 100
nhid = 500
nlayers = 2
dropout = 0.5
tied = False
save = 'foo.model'
init_args = {
'rnn_type': 'GRU',
'ntoken': ntokens,
'ninp': emsize,
'nhid': nhid,
'nlayers': nlayers,
'dropout': dropout,
'tie_weights': tied,
}
# TODO:
# Rewrite this entire thing: https://github.com/pytorch/text/issues/664
# We have to use the same numericalization as in the example before.
TEXT = data.Field(
sequential=True,
include_lengths=True,
use_vocab=True,
tokenize=lambda x : tokenizer.encode(x).tokens)
LABELS = data.LabelField(dtype=torch.float, is_target=True) # , is_target=True
NAMES = data.RawField(is_target=False)
# Fields are added by column left to write in the underlying table
fields=[('name', NAMES), ('label', LABELS), ('text', TEXT)]
train, dev, test = data.TabularDataset.splits(
path='/Users/phi/Dropbox/projects/picotext/journal/2020-05-23T1315/tmp/processed', format='CSV', fields=fields,
train='train.csv', validation='dev.csv', test='test.csv')
TEXT.build_vocab() # We'll fill this w/ the tokenizer
# https://github.com/pytorch/text/issues/358
# TEXT.vocab.itos[1] ... '<pad>'
# TEXT.vocab.itos[0] ... '<unk>'
# TEXT.vocab.itos[:10]
# ['<unk>', '<pad>', 33, 1, 35, 43, 28, 32, 48, 45]
# Make sure the numericalisation is the same used in the tokenizer AND
# thus across models the numericalisation is the same.
TEXT.vocab.stoi = tokenizer.get_vocab()
# d = {k: v for k, v in sorted(tokenizer.get_vocab().items(), key=lambda item: item[1])}
'''
TODO: missing is the <pad> token
{'<unk>': 0,
'a': 1,
...
'f': 6,
'e</w>': 7,
...
'a</w>': 12,
'''
# Sort tokenizer dict by value, take keys, transfer to TEXT field.
TEXT.vocab.itos = [k for k, v in sorted(tokenizer.get_vocab().items(), key=lambda item: item[1])]
# What's in the bag? -- TEXT.vocab.itos[:10]
# ['<unk>', 'a', 'b', 'c', 'd', 'e', 'f', 'e</w>', 'c</w>', 'd</w>']
LABELS.build_vocab(train)
# Make sure the numericalisation is from the tokenizer, not random
a = [k for k, v in sorted(TEXT.vocab.stoi.items(), key=lambda item: item[1])]
b = [k for k, v in sorted(tokenizer.get_vocab().items(), key=lambda item: item[1])]
assert a == b
# https://github.com/pytorch/text/issues/641
train_iter, dev_iter, test_iter = data.BucketIterator.splits(
(train, dev, test),
batch_size=batch_size,
# batch_sizes=(100, 100, 100),
sort_key=lambda x: len(x.text),
sort_within_batch=True, # this really allows length bucketing
device=device)
# BucketIterator will reorder the samples on each iteration, i.e. calling the
# following line twice will result in two reorderings of the samples.
for i in train_iter: pass
# Load model
# https://pytorch.org/tutorials/beginner/saving_loading_models.html
pretrained_model = torch.load('/Users/phi/data_local/picotext/models/language.45.model', map_location=torch.device('cpu'))
model = RNN_tr(init_args, nclass, pretrained_model).to(device)
# OR load a new model w/ random weights
# model = RNN_tr(init_args, nclass).to(device)
# Freeze all layers
# https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html#initialize-and-reshape-the-networks
'''
for name, param in model.named_parameters():
if not name in ['decoder.weight', 'decoder.bias']:
param.requires_grad = False
print(f'{param.requires_grad}\t{name}')
'''
# TODO: thaw layers iteratively
# optimizer_ft.add_param_group?
criterion = nn.BCELoss().to(device)
# WithLogits?
# https://discuss.pytorch.org/t/understanding-nllloss-function/23702
# https://discuss.pytorch.org/t/cross-entropy-with-one-hot-targets/13580/4
optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
# optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# TODO: scheduler
# https://github.com/davidtvs/pytorch-lr-finder/issues/49
from torch_lr_finder import LRFinder
optimizer = torch.optim.Adam(model.parameters(), lr=1e-7, weight_decay=1e-2)
lr_finder = LRFinder(model, optimizer, criterion, device="cpu")
lr_finder.range_test(train_iter, end_lr=100, num_iter=100)
lr_finder.plot() # to inspect the loss-learning rate graph
lr_finder.reset() # to reset the model and optimizer to their initial state
# Cyclical learning rates
# https://www.jeremyjordan.me/nn-learning-rate/
# https://pytorch.org/docs/stable/optim.html
# https://github.com/bckenstler/CLR
# scheduler = torch.optim.lr_scheduler.CyclicLR(
# optimizer, base_lr=0.001, max_lr=0.1)
# scheduler.step() # instead of optimizer.step()
import pdb, traceback, sys
for epoch in range(1, epochs+1):
# try:
train_fn()
evaluate()
# except RuntimeError:
# extype, value, tb = sys.exc_info()
# traceback.print_exc()
# pdb.post_mortem(tb)
'''
~/miniconda3/envs/picotext/lib/python3.7/site-packages/torch/nn/modules/rnn.py in check_hidden_size(self, hx, expected_hidden_size, msg)
185 # type: (Tensor, Tuple[int, int, int], str) -> None
186 if hx.size() != expected_hidden_size:
--> 187 raise RuntimeError(msg.format(expected_hidden_size, tuple(hx.size())))
188
189 def check_forward_args(self, input, hidden, batch_sizes):
RuntimeError: Expected hidden size (2, 85, 100), got (2, 100, 100)
'''
# Now transfer these weights and train classifier
# https://discuss.pytorch.org/t/does-deepcopying-optimizer-of-one-model-works-across-the-model-or-should-i-create-new-optimizer-every-time/14359
# https://discuss.pytorch.org/t/transfer-learning-of-weights-to-one-model-to-another/23962
# https://discuss.pytorch.org/t/copy-weights-only-from-a-networks-parameters/5841/2?u=ptrblck
'''
so really we just copy weights, freeze stuff, get a new optimizer and go
http://seba1511.net/tutorials/beginner/transfer_learning_tutorial.html#finetuning-the-convnet
'''
'''
# Load pretrained weights
model2 = RNN_lm('GRU', ntokens, emsize, nhid, nlayers, dropout, tied).to(device)
model2.load_state_dict(model.state_dict()) # <All keys matched successfully>
# Change output layer
insize = model.decoder.in_features
model2.decoder = nn.Linear(insize, 1)
model2.forward = lambda x: print(x)
def foo(x):
return x
model2.forward = foo
# https://discuss.pytorch.org/t/are-there-any-recommended-methods-to-clone-a-model/483/11
import copy
m2 = copy.deepcopy(model)
n_classes = 2
m2.decoder = nn.Linear(model.decoder.in_features, n_classes)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hidden)
output = self.drop(output)
decoded = self.decoder(output)
return 'whoop'
# decoded = decoded.view(-1, self.ntoken)
# return F.log_softmax(decoded, dim=1), hidden
m2.forward = forward
m2(m2, batch, hidden)
'''
'''
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
'''
# batch, targets = get_batch(train_data, 0)
# emb = model2.encoder(batch)
# hidden = model.init_hidden(batch_size)
# output, hidden = model2.rnn(emb, hidden)
# model2.decoder(output)[-1].shape
# https://discuss.pytorch.org/t/how-to-modify-a-pretrained-model/60509
# https://discuss.pytorch.org/t/modify-forward-of-pretrained-model/52530
# https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
# https://heartbeat.fritz.ai/transfer-learning-with-pytorch-cfcb69016c72
# https://brsoff.github.io/tutorials/beginner/transfer_learning_tutorial.html
# https://github.com/pytorch/tutorials/blob/master/beginner_source/transfer_learning_tutorial.py
# https://github.com/pytorch/tutorials/blob/master/beginner_source/transfer_learning_tutorial.py
# https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html
# https://www.youtube.com/watch?v=K0j9AqcFsiw
# Python Pytorch Tutorials # 1 Transfer Learning : DataLoaders Pytorch
'''
See "Freezing the convolutional layers & replacing the fully connected layers with a custom classifier" -- https://heartbeat.fritz.ai/transfer-learning-with-pytorch-cfcb69016c72
They call it "Reshaping" the model:
> Now to the most interesting part. Here is where we handle the reshaping of each network. Note, this is not an automatic procedure and is unique to | |
# -*- coding: utf-8 -*-
"""Developer convenience functions for ibs (detections).
TODO: need to split up into sub modules:
consistency_checks
feasibility_fixes
move the export stuff to dbio
then there are also convineience functions that need to be ordered at least
within this file
"""
import logging
from os.path import exists, expanduser, join, abspath
import numpy as np
import utool as ut
import cv2
from wbia.control import controller_inject
from wbia.other.detectfuncs import (
general_parse_gt,
general_get_imageset_gids,
localizer_parse_pred,
general_overlap,
)
from wbia.other.detectcore import (
nms,
classifier_visualize_training_localizations,
_bootstrap_mine,
)
# Inject utool functions
(print, rrr, profile) = ut.inject2(__name__, '[other.detectgrave]')
logger = logging.getLogger('wbia')
CLASS_INJECT_KEY, register_ibs_method = controller_inject.make_ibs_register_decorator(
__name__
)
@register_ibs_method
def bootstrap_pca_train(
ibs, dims=64, pca_limit=500000, ann_batch=50, output_path=None, **kwargs
):
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import IncrementalPCA
from annoy import AnnoyIndex
import numpy as np
import random
def _get_data(depc, gid_list, limit=None, shuffle=False):
gid_list_ = gid_list[:]
if shuffle:
random.shuffle(gid_list_)
config = {
'algo': '_COMBINED',
'features': True,
'feature2_algo': 'resnet',
}
total = 0
features_list = []
index_list = []
gid_iter = ut.ProgIter(gid_list_, lbl='collect feature vectors', bs=True)
for gid in gid_iter:
if limit is not None and total >= limit:
break
feature_list = depc.get_property(
'localizations_features', gid, 'vector', config=config
)
total += len(feature_list)
index_list += [(gid, offset) for offset in range(len(feature_list))]
features_list.append(feature_list)
logger.info('\nUsed %d images to mine %d features' % (len(features_list), total))
data_list = np.vstack(features_list)
if len(data_list) > limit:
data_list = data_list[:limit]
index_list = index_list[:limit]
assert len(data_list) == len(index_list)
features_list = None
return total, data_list, index_list
# gid_list = ibs.get_valid_gids()
gid_list = general_get_imageset_gids(ibs, 'TRAIN_SET', **kwargs)
# gid_list = gid_list[:200]
# Get data
depc = ibs.depc_image
total, data_list, index_list = _get_data(depc, gid_list, pca_limit, True)
logger.info(data_list.shape)
# Normalize data
logger.info('Fit Scaler')
scaler = StandardScaler()
scaler.fit(data_list)
data_list = scaler.transform(data_list)
# Fit PCA
logger.info('Fit PCA')
pca_model = IncrementalPCA(n_components=dims)
pca_model.fit(data_list)
pca_quality = pca_model.explained_variance_ratio_.sum() * 100.0
logger.info('PCA Variance Quality: %0.04f %%' % (pca_quality,))
# Fit ANN for PCA's vectors
index = 0
ann_model = AnnoyIndex(dims) # Length of item vector that will be indexed
ann_rounds = int(np.ceil(float(len(gid_list)) / ann_batch))
manifest_dict = {}
for ann_round in range(ann_rounds):
start_index = ann_round * ann_batch
stop_index = (ann_round + 1) * ann_batch
assert start_index < len(gid_list)
stop_index = min(stop_index, len(gid_list))
logger.info('Slicing index range: [%r, %r)' % (start_index, stop_index))
# Slice gids and get feature data
gid_list_ = gid_list[start_index:stop_index]
total, data_list, index_list = _get_data(depc, gid_list_)
# Scaler
data_list = scaler.transform(data_list)
# Transform data to smaller vectors
data_list_ = pca_model.transform(data_list)
zipped = zip(index_list, data_list_)
data_iter = ut.ProgIter(zipped, lbl='add vectors to ANN model', bs=True)
for (gid, offset), feature in data_iter:
ann_model.add_item(index, feature)
manifest_dict[index] = (
gid,
offset,
)
index += 1
# Build forest
trees = index // 100000
logger.info('Build ANN model using %d feature vectors and %d trees' % (index, trees))
ann_model.build(trees)
# Save forest
if output_path is None:
output_path = abspath(expanduser(join('~', 'code', 'wbia', 'models')))
scaler_filename = 'forest.pca'
scaler_filepath = join(output_path, scaler_filename)
logger.info('Saving scaler model to: %r' % (scaler_filepath,))
model_tup = (
pca_model,
scaler,
manifest_dict,
)
ut.save_cPkl(scaler_filepath, model_tup)
forest_filename = 'forest.ann'
forest_filepath = join(output_path, forest_filename)
logger.info('Saving ANN model to: %r' % (forest_filepath,))
ann_model.save(forest_filepath)
# ibs.bootstrap_pca_test(model_path=output_path)
return output_path
@register_ibs_method
def bootstrap_pca_test(
ibs,
dims=64,
pca_limit=500000,
ann_batch=50,
model_path=None,
output_path=None,
neighbors=1000,
nms_thresh=0.5,
min_confidence=0.3,
**kwargs,
):
from annoy import AnnoyIndex
import random
if output_path is None:
output_path = abspath(expanduser(join('~', 'Desktop', 'output-ann')))
ut.ensuredir(output_path)
# gid_list = ibs.get_valid_gids()
gid_list = general_get_imageset_gids(ibs, 'TRAIN_SET', **kwargs)
random.shuffle(gid_list)
# gid_list = gid_list[:100]
# Load forest
if model_path is None:
model_path = abspath(expanduser(join('~', 'code', 'wbia', 'models')))
scaler_filename = 'forest.pca'
scaler_filepath = join(model_path, scaler_filename)
logger.info('Loading scaler model from: %r' % (scaler_filepath,))
model_tup = ut.load_cPkl(scaler_filepath)
pca_model, scaler, manifest_dict = model_tup
forest_filename = 'forest.ann'
forest_filepath = join(model_path, forest_filename)
logger.info('Loading ANN model from: %r' % (forest_filepath,))
ann_model = AnnoyIndex(dims)
ann_model.load(forest_filepath)
config = {
'algo': '_COMBINED',
'features': True,
'feature2_algo': 'resnet',
'classify': True,
'classifier_algo': 'svm',
'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.image.zebra.pkl',
}
logger.info('\tGather Ground-Truth')
gt_dict = general_parse_gt(ibs, test_gid_list=gid_list, **config)
logger.info('\tGather Predictions')
pred_dict = localizer_parse_pred(ibs, test_gid_list=gid_list, **config)
for image_uuid in gt_dict:
# Get the gt and prediction list
gt_list = gt_dict[image_uuid]
pred_list = pred_dict[image_uuid]
# Calculate overlap
overlap = general_overlap(gt_list, pred_list)
num_gt, num_pred = overlap.shape
max_overlap = np.max(overlap, axis=0)
index_list = np.argsort(max_overlap)
example_limit = 1
worst_idx_list = index_list[:example_limit]
best_idx_list = index_list[-1 * example_limit :]
logger.info('Worst ovelap: %r' % (overlap[:, worst_idx_list],))
logger.info('Best ovelap: %r' % (overlap[:, best_idx_list],))
for idx_list in [best_idx_list, worst_idx_list]:
example_list = ut.take(pred_list, idx_list)
interpolation = cv2.INTER_LANCZOS4
warpkw = dict(interpolation=interpolation)
for example, offset in zip(example_list, idx_list):
gid = example['gid']
feature_list = np.array([example['feature']])
data_list = scaler.transform(feature_list)
data_list_ = pca_model.transform(data_list)[0]
neighbor_index_list = ann_model.get_nns_by_vector(data_list_, neighbors)
neighbor_manifest_list = list(
set(
[
manifest_dict[neighbor_index]
for neighbor_index in neighbor_index_list
]
)
)
neighbor_gid_list_ = ut.take_column(neighbor_manifest_list, 0)
neighbor_gid_list_ = [gid] + neighbor_gid_list_
neighbor_uuid_list_ = ibs.get_image_uuids(neighbor_gid_list_)
neighbor_offset_list_ = ut.take_column(neighbor_manifest_list, 1)
neighbor_offset_list_ = [offset] + neighbor_offset_list_
neighbor_gid_set_ = list(set(neighbor_gid_list_))
neighbor_image_list = ibs.get_images(neighbor_gid_set_)
neighbor_image_dict = {
gid: image
for gid, image in zip(neighbor_gid_set_, neighbor_image_list)
}
neighbor_pred_dict = localizer_parse_pred(
ibs, test_gid_list=neighbor_gid_set_, **config
)
neighbor_dict = {}
zipped = zip(
neighbor_gid_list_, neighbor_uuid_list_, neighbor_offset_list_
)
for neighbor_gid, neighbor_uuid, neighbor_offset in zipped:
if neighbor_gid not in neighbor_dict:
neighbor_dict[neighbor_gid] = []
neighbor_pred = neighbor_pred_dict[neighbor_uuid][neighbor_offset]
neighbor_dict[neighbor_gid].append(neighbor_pred)
# Perform NMS
chip_list = []
query_image = ibs.get_images(gid)
xbr = example['xbr']
ybr = example['ybr']
xtl = example['xtl']
ytl = example['ytl']
height, width = query_image.shape[:2]
xbr = int(xbr * width)
ybr = int(ybr * height)
xtl = int(xtl * width)
ytl = int(ytl * height)
# Get chips
try:
chip = query_image[ytl:ybr, xtl:xbr, :]
chip = cv2.resize(chip, (192, 192), **warpkw)
chip_list.append(chip)
except Exception:
pass
chip_list.append(np.zeros((192, 10, 3)))
for neighbor_gid in neighbor_dict:
neighbor_list = neighbor_dict[neighbor_gid]
# Compile coordinate list of (xtl, ytl, xbr, ybr) instead of (xtl, ytl, w, h)
coord_list = []
confs_list = []
for neighbor in neighbor_list:
xbr = neighbor['xbr']
ybr = neighbor['ybr']
xtl = neighbor['xtl']
ytl = neighbor['ytl']
conf = neighbor['confidence']
coord_list.append([xtl, ytl, xbr, ybr])
confs_list.append(conf)
coord_list = np.vstack(coord_list)
confs_list = np.array(confs_list)
# Perform NMS
keep_indices_list = nms(coord_list, confs_list, nms_thresh)
keep_indices_set = set(keep_indices_list)
neighbor_list_ = [
neighbor
for index, neighbor in enumerate(neighbor_list)
if index in keep_indices_set
]
neighbor_image = neighbor_image_dict[neighbor_gid]
for neightbor_ in neighbor_list_:
xbr = neightbor_['xbr']
ybr = neightbor_['ybr']
xtl = neightbor_['xtl']
ytl = neightbor_['ytl']
conf = neighbor['confidence']
height, width = neighbor_image.shape[:2]
xbr = int(xbr * width)
ybr = int(ybr * height)
xtl = int(xtl * width)
ytl = int(ytl * height)
# Get chips
try:
chip = neighbor_image[ytl:ybr, xtl:xbr, :]
chip = cv2.resize(chip, (192, 192), **warpkw)
color = (0, 255, 0) if conf >= min_confidence else (0, 0, 255)
cv2.rectangle(chip, (0, 0), (192, 192), color, 10)
chip_list.append(chip)
except Exception:
pass
min_chips = 16
if len(chip_list) < min_chips:
continue
chip_list = chip_list[:min_chips]
canvas = np.hstack(chip_list)
output_filename = 'neighbors_%d_%d.png' % (gid, offset)
output_filepath = join(output_path, output_filename)
cv2.imwrite(output_filepath, canvas)
@register_ibs_method
def bootstrap(
ibs,
species_list=['zebra'],
N=10,
rounds=20,
scheme=2,
ensemble=9,
output_path=None,
precompute=True,
precompute_test=True,
recompute=False,
visualize=True,
C=1.0,
kernel='rbf',
**kwargs,
):
from sklearn import svm, preprocessing
# Establish variables
kernel = str(kernel.lower())
species_list = [species.lower() for species in species_list]
species_list_str = '.'.join(species_list)
assert scheme in [1, 2], 'Invalid scheme'
if output_path is None:
# species_list_str = '+'.join(species_list)
# args = (N, rounds, scheme, species_list_str, )
# output_path_ = 'models-bootstrap-%s-%s-%s-%s' % args
output_path_ = 'models-bootstrap'
output_path = abspath(expanduser(join('~', 'code', 'wbia', output_path_)))
logger.info('Using output_path = %r' % (output_path,))
if recompute:
ut.delete(output_path)
ut.ensuredir(output_path)
# Get the test images for later
depc = ibs.depc_image
test_gid_list = general_get_imageset_gids(ibs, 'TEST_SET', **kwargs)
wic_model_filepath = ibs.classifier_train_image_svm(
species_list, output_path=output_path, dryrun=True
)
is_wic_model_trained = exists(wic_model_filepath)
######################################################################################
# Step 1: train whole-image classifier
# this will compute and cache any ResNet features that
# haven't been computed
if not is_wic_model_trained:
wic_model_filepath = ibs.classifier_train_image_svm(
species_list, output_path=output_path
)
# Load model pickle
model_tup = ut.load_cPkl(wic_model_filepath)
model, scaler = model_tup
######################################################################################
# Step 2: sort all test images based on whole image classifier
# establish a review ordering based on classification probability
# Get scores
vals = get_classifier_svm_data_labels(ibs, 'TRAIN_SET', species_list)
train_gid_set, data_list, label_list = vals
# Normalize data
data_list = scaler.transform(data_list)
# score_list_ = model.decision_function(data_list) # NOQA
score_list_ = model.predict_proba(data_list)
score_list_ = score_list_[:, 1]
# Sort gids by scores (initial ranking)
comb_list = sorted(list(zip(score_list_, train_gid_set)), reverse=True)
sorted_gid_list = [comb[1] for comb in comb_list]
config = {
'algo': '_COMBINED',
'species_set': set(species_list),
'features': True,
'feature2_algo': 'resnet',
'classify': True,
'classifier_algo': 'svm',
| |
""" Cisco_IOS_XR_infra_xtc_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR infra\-xtc package operational data.
This module contains definitions
for the following management objects\:
pce\-lsp\-data\: PCE LSP's data
pce\-peer\: pce peer
pce\-topology\: pce topology
pce\: pce
Copyright (c) 2013\-2017 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class LspSetup(Enum):
"""
LspSetup (Enum Class)
LSP setup type
.. data:: setup_rsvp = 0
LSP is established using RSVP-TE
.. data:: setup_sr = 1
LSP is established using SR-TE
.. data:: setup_unknown = 2
Unknown LSP establishment method
"""
setup_rsvp = Enum.YLeaf(0, "setup-rsvp")
setup_sr = Enum.YLeaf(1, "setup-sr")
setup_unknown = Enum.YLeaf(2, "setup-unknown")
class LspState(Enum):
"""
LspState (Enum Class)
LSP setup type
.. data:: lsp_down = 0
LSP is down
.. data:: lsp_up = 1
LSP is up
"""
lsp_down = Enum.YLeaf(0, "lsp-down")
lsp_up = Enum.YLeaf(1, "lsp-up")
class PceAfId(Enum):
"""
PceAfId (Enum Class)
Pce af id
.. data:: none = 0
None
.. data:: ipv4 = 1
IPv4
.. data:: ipv6 = 2
IPv6
"""
none = Enum.YLeaf(0, "none")
ipv4 = Enum.YLeaf(1, "ipv4")
ipv6 = Enum.YLeaf(2, "ipv6")
class PceAsso(Enum):
"""
PceAsso (Enum Class)
Pce asso
.. data:: unknown = 0
Unknown type
.. data:: link = 1
LINK
.. data:: node = 2
NODE
.. data:: srlg = 3
SRLG
"""
unknown = Enum.YLeaf(0, "unknown")
link = Enum.YLeaf(1, "link")
node = Enum.YLeaf(2, "node")
srlg = Enum.YLeaf(3, "srlg")
class PceCspfRc(Enum):
"""
PceCspfRc (Enum Class)
PCE CSPF Result Code
.. data:: pce_cspf_not_set = 0
Not set
.. data:: pce_cspf_src_not_found = 1
Source not found
.. data:: pce_cspf_dst_not_found = 2
Destination not found
.. data:: pce_cspf_second_src_not_found = 3
Second source not found
.. data:: pce_cspf_second_dst_not_found = 4
Second destination not found
.. data:: pce_cspf_no_mem = 5
No memory
.. data:: pce_cspf_ex_path_not_resolved = 6
Second path not resolved
.. data:: pce_cspf_no_path = 7
No path
.. data:: pce_cspf_sp_success = 8
Shortest path success
.. data:: pce_cspf_error = 9
Error
.. data:: pce_cspf_fallback_srlg_node_node = 10
Fallback from SRLG-NODE to NODE
.. data:: pce_cspf_fallback_srlg_node_link = 11
Fallback from SRLG-NODE to LINK
.. data:: pce_cspf_fallback_srlg_node_sp = 12
Fallback from SRLG-NODE to SP
.. data:: pce_cspf_fallback_node_link = 13
Fallback from NODE to LINK
.. data:: pce_cspf_fallback_link_sp = 14
Fallback from LINK to SP
.. data:: pce_cspf_fallback_node_sp = 15
Fallback from NODE to SP
.. data:: pce_cspf_fallback_srlg_link = 16
Fallback from SRLG to LINK
.. data:: pce_cspf_fallback_srlg_sp = 17
Fallback from SRLG to SP
.. data:: pce_cspf_dp_success = 18
Disjoint path success
"""
pce_cspf_not_set = Enum.YLeaf(0, "pce-cspf-not-set")
pce_cspf_src_not_found = Enum.YLeaf(1, "pce-cspf-src-not-found")
pce_cspf_dst_not_found = Enum.YLeaf(2, "pce-cspf-dst-not-found")
pce_cspf_second_src_not_found = Enum.YLeaf(3, "pce-cspf-second-src-not-found")
pce_cspf_second_dst_not_found = Enum.YLeaf(4, "pce-cspf-second-dst-not-found")
pce_cspf_no_mem = Enum.YLeaf(5, "pce-cspf-no-mem")
pce_cspf_ex_path_not_resolved = Enum.YLeaf(6, "pce-cspf-ex-path-not-resolved")
pce_cspf_no_path = Enum.YLeaf(7, "pce-cspf-no-path")
pce_cspf_sp_success = Enum.YLeaf(8, "pce-cspf-sp-success")
pce_cspf_error = Enum.YLeaf(9, "pce-cspf-error")
pce_cspf_fallback_srlg_node_node = Enum.YLeaf(10, "pce-cspf-fallback-srlg-node-node")
pce_cspf_fallback_srlg_node_link = Enum.YLeaf(11, "pce-cspf-fallback-srlg-node-link")
pce_cspf_fallback_srlg_node_sp = Enum.YLeaf(12, "pce-cspf-fallback-srlg-node-sp")
pce_cspf_fallback_node_link = Enum.YLeaf(13, "pce-cspf-fallback-node-link")
pce_cspf_fallback_link_sp = Enum.YLeaf(14, "pce-cspf-fallback-link-sp")
pce_cspf_fallback_node_sp = Enum.YLeaf(15, "pce-cspf-fallback-node-sp")
pce_cspf_fallback_srlg_link = Enum.YLeaf(16, "pce-cspf-fallback-srlg-link")
pce_cspf_fallback_srlg_sp = Enum.YLeaf(17, "pce-cspf-fallback-srlg-sp")
pce_cspf_dp_success = Enum.YLeaf(18, "pce-cspf-dp-success")
class PceHeadendSwap(Enum):
"""
PceHeadendSwap (Enum Class)
PCE Headends Swap Code
.. data:: pcehs_none = 0
Headends not swapped
.. data:: pcehs_plain = 1
Headends swapped
.. data:: pcehs_rwi = 2
Headends swapped with increment
"""
pcehs_none = Enum.YLeaf(0, "pcehs-none")
pcehs_plain = Enum.YLeaf(1, "pcehs-plain")
pcehs_rwi = Enum.YLeaf(2, "pcehs-rwi")
class PceIgpInfoId(Enum):
"""
PceIgpInfoId (Enum Class)
IGP IDs
.. data:: isis = 1
ISIS
.. data:: ospf = 2
OSPF
.. data:: bgp = 3
BGP
"""
isis = Enum.YLeaf(1, "isis")
ospf = Enum.YLeaf(2, "ospf")
bgp = Enum.YLeaf(3, "bgp")
class PceProto(Enum):
"""
PceProto (Enum Class)
PCE peer protocol
.. data:: pcep = 0
PCE protocol
.. data:: netconf = 1
Netconf protocol
"""
pcep = Enum.YLeaf(0, "pcep")
netconf = Enum.YLeaf(1, "netconf")
class PceRro(Enum):
"""
PceRro (Enum Class)
PCE RRO type
.. data:: rro_type_ipv4_address = 0
IPv4 Address
.. data:: rro_type_mpls_label = 1
MPLS Label
.. data:: rro_type_sripv4_node_sid = 2
Segment Routing IPv4 Node SID
.. data:: rro_type_sripv4_adjacency_sid = 3
Segment Routing IPv4 Adjacency SID
.. data:: rro_type_sr_nai_null = 4
Segment Routing with NAI null
"""
rro_type_ipv4_address = Enum.YLeaf(0, "rro-type-ipv4-address")
rro_type_mpls_label = Enum.YLeaf(1, "rro-type-mpls-label")
rro_type_sripv4_node_sid = Enum.YLeaf(2, "rro-type-sripv4-node-sid")
rro_type_sripv4_adjacency_sid = Enum.YLeaf(3, "rro-type-sripv4-adjacency-sid")
rro_type_sr_nai_null = Enum.YLeaf(4, "rro-type-sr-nai-null")
class PceSrSid(Enum):
"""
PceSrSid (Enum Class)
PCE SR SID type
.. data:: ipv4_node_sid = 0
IPv4 Node SID
.. data:: ipv4_adjacency_sid = 1
IPv4 Adjacency SID
.. data:: unknown_sid = 2
Unknown SID
"""
ipv4_node_sid = Enum.YLeaf(0, "ipv4-node-sid")
ipv4_adjacency_sid = Enum.YLeaf(1, "ipv4-adjacency-sid")
unknown_sid = Enum.YLeaf(2, "unknown-sid")
class PcepLspState(Enum):
"""
PcepLspState (Enum Class)
PCEP operation protocol
.. data:: lsp_down = 0
LSP is down
.. data:: lsp_up = 1
LSP is up
.. data:: lsp_active = 2
LSP is active (carrying traffic)
.. data:: lsp_going_down = 3
LSP is going down
.. data:: lsp_being_signaled = 4
LSP is being signaled
"""
lsp_down = Enum.YLeaf(0, "lsp-down")
lsp_up = Enum.YLeaf(1, "lsp-up")
lsp_active = Enum.YLeaf(2, "lsp-active")
lsp_going_down = Enum.YLeaf(3, "lsp-going-down")
lsp_being_signaled = Enum.YLeaf(4, "lsp-being-signaled")
class PcepState(Enum):
"""
PcepState (Enum Class)
PCEP State
.. data:: tcp_close = 0
TCP close
.. data:: tcp_listen = 1
TCP listen
.. data:: tcp_connect = 2
TCP connect
.. data:: pcep_closed = 3
PCEP closed
.. data:: pcep_opening = 4
PCEP opening
.. data:: pcep_open = 5
PCEP open
"""
tcp_close = Enum.YLeaf(0, "tcp-close")
tcp_listen = Enum.YLeaf(1, "tcp-listen")
tcp_connect = Enum.YLeaf(2, "tcp-connect")
pcep_closed = Enum.YLeaf(3, "pcep-closed")
pcep_opening = Enum.YLeaf(4, "pcep-opening")
pcep_open = Enum.YLeaf(5, "pcep-open")
class Sid(Enum):
"""
Sid (Enum Class)
SID Types
.. data:: sr_protected_adj_sid = 1
Protected Adjacency SID
.. data:: sr_unprotected_adj_sid = 2
Unprotected Adjacency SID
.. data:: sr_bgp_egress_peer_engineering_sid = 3
BGP egress peer engineering SID
.. data:: sr_reqular_prefix_sid = 4
Regular prefix SID
.. data:: sr_strict_prefix_sid = 5
Strict prefix SID
"""
sr_protected_adj_sid = Enum.YLeaf(1, "sr-protected-adj-sid")
sr_unprotected_adj_sid = Enum.YLeaf(2, "sr-unprotected-adj-sid")
sr_bgp_egress_peer_engineering_sid = Enum.YLeaf(3, "sr-bgp-egress-peer-engineering-sid")
sr_reqular_prefix_sid = Enum.YLeaf(4, "sr-reqular-prefix-sid")
sr_strict_prefix_sid = Enum.YLeaf(5, "sr-strict-prefix-sid")
class PceLspData(Entity):
"""
PCE LSP's data
.. attribute:: tunnel_infos
Tunnel database in XTC
**type**\: :py:class:`TunnelInfos <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.TunnelInfos>`
.. attribute:: lsp_summary
LSP summary database in XTC
**type**\: :py:class:`LspSummary <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.LspSummary>`
.. attribute:: tunnel_detail_infos
Detailed tunnel database in XTC
**type**\: :py:class:`TunnelDetailInfos <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.TunnelDetailInfos>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData, self).__init__()
self._top_entity = None
self.yang_name = "pce-lsp-data"
self.yang_parent_name = "Cisco-IOS-XR-infra-xtc-oper"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("tunnel-infos", ("tunnel_infos", PceLspData.TunnelInfos)), ("lsp-summary", ("lsp_summary", PceLspData.LspSummary)), ("tunnel-detail-infos", ("tunnel_detail_infos", PceLspData.TunnelDetailInfos))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.tunnel_infos = PceLspData.TunnelInfos()
self.tunnel_infos.parent = self
self._children_name_map["tunnel_infos"] = "tunnel-infos"
self._children_yang_names.add("tunnel-infos")
self.lsp_summary = PceLspData.LspSummary()
self.lsp_summary.parent = self
self._children_name_map["lsp_summary"] = "lsp-summary"
self._children_yang_names.add("lsp-summary")
self.tunnel_detail_infos = PceLspData.TunnelDetailInfos()
self.tunnel_detail_infos.parent = self
self._children_name_map["tunnel_detail_infos"] = "tunnel-detail-infos"
self._children_yang_names.add("tunnel-detail-infos")
self._segment_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce-lsp-data"
class TunnelInfos(Entity):
"""
Tunnel database in XTC
.. attribute:: tunnel_info
Tunnel information
**type**\: list of :py:class:`TunnelInfo <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.TunnelInfos.TunnelInfo>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData.TunnelInfos, self).__init__()
self.yang_name = "tunnel-infos"
self.yang_parent_name = "pce-lsp-data"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("tunnel-info", ("tunnel_info", PceLspData.TunnelInfos.TunnelInfo))])
self._leafs = OrderedDict()
self.tunnel_info = YList(self)
self._segment_path = lambda: "tunnel-infos"
self._absolute_path = lambda: "Cisco-IOS-XR-infra-xtc-oper:pce-lsp-data/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(PceLspData.TunnelInfos, [], name, value)
class TunnelInfo(Entity):
"""
Tunnel information
.. attribute:: peer_address (key)
Peer Address
**type**\: union of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
.. attribute:: plsp_id (key)
PCEP LSP ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: tunnel_name (key)
Tunnel name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: pcc_address
PCC address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: tunnel_name_xr
Tunnel Name
**type**\: str
.. attribute:: brief_lsp_information
Brief LSP information
**type**\: list of :py:class:`BriefLspInformation <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceLspData.TunnelInfos.TunnelInfo.BriefLspInformation>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceLspData.TunnelInfos.TunnelInfo, self).__init__()
self.yang_name = "tunnel-info"
self.yang_parent_name = "tunnel-infos"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['peer_address','plsp_id','tunnel_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("brief-lsp-information", ("brief_lsp_information", PceLspData.TunnelInfos.TunnelInfo.BriefLspInformation))])
self._leafs | |
-m.b1018 + m.b1019 - m.b1139 <= 0)
m.e1827 = Constraint(expr= -m.b1018 - m.b1019 + m.b1020 - m.b1140 <= 0)
m.e1828 = Constraint(expr= m.b1021 - m.b1141 <= 0)
m.e1829 = Constraint(expr= -m.b1021 + m.b1022 - m.b1142 <= 0)
m.e1830 = Constraint(expr= -m.b1021 - m.b1022 + m.b1023 - m.b1143 <= 0)
m.e1831 = Constraint(expr= m.b1024 - m.b1144 <= 0)
m.e1832 = Constraint(expr= -m.b1024 + m.b1025 - m.b1145 <= 0)
m.e1833 = Constraint(expr= -m.b1024 - m.b1025 + m.b1026 - m.b1146 <= 0)
m.e1834 = Constraint(expr= m.b907 + m.b910 == 1)
m.e1835 = Constraint(expr= m.b908 + m.b911 == 1)
m.e1836 = Constraint(expr= m.b909 + m.b912 == 1)
m.e1837 = Constraint(expr= -m.b913 + m.b922 + m.b925 >= 0)
m.e1838 = Constraint(expr= -m.b914 + m.b923 + m.b926 >= 0)
m.e1839 = Constraint(expr= -m.b915 + m.b924 + m.b927 >= 0)
m.e1840 = Constraint(expr= -m.b922 + m.b940 >= 0)
m.e1841 = Constraint(expr= -m.b923 + m.b941 >= 0)
m.e1842 = Constraint(expr= -m.b924 + m.b942 >= 0)
m.e1843 = Constraint(expr= -m.b925 + m.b943 >= 0)
m.e1844 = Constraint(expr= -m.b926 + m.b944 >= 0)
m.e1845 = Constraint(expr= -m.b927 + m.b945 >= 0)
m.e1846 = Constraint(expr= -m.b916 + m.b928 >= 0)
m.e1847 = Constraint(expr= -m.b917 + m.b929 >= 0)
m.e1848 = Constraint(expr= -m.b918 + m.b930 >= 0)
m.e1849 = Constraint(expr= -m.b928 + m.b946 + m.b949 >= 0)
m.e1850 = Constraint(expr= -m.b929 + m.b947 + m.b950 >= 0)
m.e1851 = Constraint(expr= -m.b930 + m.b948 + m.b951 >= 0)
m.e1852 = Constraint(expr= -m.b919 + m.b931 + m.b934 + m.b937 >= 0)
m.e1853 = Constraint(expr= -m.b920 + m.b932 + m.b935 + m.b938 >= 0)
m.e1854 = Constraint(expr= -m.b921 + m.b933 + m.b936 + m.b939 >= 0)
m.e1855 = Constraint(expr= -m.b931 + m.b949 >= 0)
m.e1856 = Constraint(expr= -m.b932 + m.b950 >= 0)
m.e1857 = Constraint(expr= -m.b933 + m.b951 >= 0)
m.e1858 = Constraint(expr= -m.b934 + m.b952 + m.b955 >= 0)
m.e1859 = Constraint(expr= -m.b935 + m.b953 + m.b956 >= 0)
m.e1860 = Constraint(expr= -m.b936 + m.b954 + m.b957 >= 0)
m.e1861 = Constraint(expr= -m.b937 + m.b958 + m.b961 + m.b964 >= 0)
m.e1862 = Constraint(expr= -m.b938 + m.b959 + m.b962 + m.b965 >= 0)
m.e1863 = Constraint(expr= -m.b939 + m.b960 + m.b963 + m.b966 >= 0)
m.e1864 = Constraint(expr= m.b913 - m.b922 >= 0)
m.e1865 = Constraint(expr= m.b914 - m.b923 >= 0)
m.e1866 = Constraint(expr= m.b915 - m.b924 >= 0)
m.e1867 = Constraint(expr= m.b913 - m.b925 >= 0)
m.e1868 = Constraint(expr= m.b914 - m.b926 >= 0)
m.e1869 = Constraint(expr= m.b915 - m.b927 >= 0)
m.e1870 = Constraint(expr= m.b916 - m.b928 >= 0)
m.e1871 = Constraint(expr= m.b917 - m.b929 >= 0)
m.e1872 = Constraint(expr= m.b918 - m.b930 >= 0)
m.e1873 = Constraint(expr= m.b919 - m.b931 >= 0)
m.e1874 = Constraint(expr= m.b920 - m.b932 >= 0)
m.e1875 = Constraint(expr= m.b921 - m.b933 >= 0)
m.e1876 = Constraint(expr= m.b919 - m.b934 >= 0)
m.e1877 = Constraint(expr= m.b920 - m.b935 >= 0)
m.e1878 = Constraint(expr= m.b921 - m.b936 >= 0)
m.e1879 = Constraint(expr= m.b919 - m.b937 >= 0)
m.e1880 = Constraint(expr= m.b920 - m.b938 >= 0)
m.e1881 = Constraint(expr= m.b921 - m.b939 >= 0)
m.e1882 = Constraint(expr= m.b922 - m.b940 >= 0)
m.e1883 = Constraint(expr= m.b923 - m.b941 >= 0)
m.e1884 = Constraint(expr= m.b924 - m.b942 >= 0)
m.e1885 = Constraint(expr= m.b925 - m.b943 >= 0)
m.e1886 = Constraint(expr= m.b926 - m.b944 >= 0)
m.e1887 = Constraint(expr= m.b927 - m.b945 >= 0)
m.e1888 = Constraint(expr= m.b928 - m.b946 >= 0)
m.e1889 = Constraint(expr= m.b929 - m.b947 >= 0)
m.e1890 = Constraint(expr= m.b930 - m.b948 >= 0)
m.e1891 = Constraint(expr= m.b928 - m.b949 >= 0)
m.e1892 = Constraint(expr= m.b929 - m.b950 >= 0)
m.e1893 = Constraint(expr= m.b930 - m.b951 >= 0)
m.e1894 = Constraint(expr= m.b934 - m.b952 >= 0)
m.e1895 = Constraint(expr= m.b935 - m.b953 >= 0)
m.e1896 = Constraint(expr= m.b936 - m.b954 >= 0)
m.e1897 = Constraint(expr= m.b934 - m.b955 >= 0)
m.e1898 = Constraint(expr= m.b935 - m.b956 >= 0)
m.e1899 = Constraint(expr= m.b936 - m.b957 >= 0)
m.e1900 = Constraint(expr= m.b937 - m.b958 >= 0)
m.e1901 = Constraint(expr= m.b938 - m.b959 >= 0)
m.e1902 = Constraint(expr= m.b939 - m.b960 >= 0)
m.e1903 = Constraint(expr= m.b937 - m.b961 >= 0)
m.e1904 = Constraint(expr= m.b938 - m.b962 >= 0)
m.e1905 = Constraint(expr= m.b939 - m.b963 >= 0)
m.e1906 = Constraint(expr= m.b937 - m.b964 >= 0)
m.e1907 = Constraint(expr= m.b938 - m.b965 >= 0)
m.e1908 = Constraint(expr= m.b939 - m.b966 >= 0)
m.e1909 = Constraint(expr= -m.b964 + m.b967 + m.b970 >= 0)
m.e1910 = Constraint(expr= -m.b965 + m.b968 + m.b971 >= 0)
m.e1911 = Constraint(expr= -m.b966 + m.b969 + m.b972 >= 0)
m.e1912 = Constraint(expr= -m.b973 + m.b982 + m.b985 >= 0)
m.e1913 = Constraint(expr= -m.b974 + m.b983 + m.b986 >= 0)
m.e1914 = Constraint(expr= -m.b975 + m.b984 + m.b987 >= 0)
m.e1915 = Constraint(expr= -m.b982 + m.b1000 >= 0)
m.e1916 = Constraint(expr= -m.b983 + m.b1001 >= 0)
m.e1917 = Constraint(expr= -m.b984 + m.b1002 >= 0)
m.e1918 = Constraint(expr= -m.b985 + m.b1003 >= 0)
m.e1919 = Constraint(expr= -m.b986 + m.b1004 >= 0)
m.e1920 = Constraint(expr= -m.b987 + m.b1005 >= 0)
m.e1921 = Constraint(expr= -m.b976 + m.b988 >= 0)
m.e1922 = Constraint(expr= -m.b977 + m.b989 >= 0)
m.e1923 = Constraint(expr= -m.b978 + m.b990 >= 0)
m.e1924 = Constraint(expr= -m.b988 + m.b1006 + m.b1009 >= 0)
m.e1925 = Constraint(expr= -m.b989 + m.b1007 + m.b1010 >= 0)
m.e1926 = Constraint(expr= -m.b990 + m.b1008 + m.b1011 >= 0)
m.e1927 = Constraint(expr= -m.b979 + m.b991 + m.b994 + m.b997 >= 0)
m.e1928 = Constraint(expr= -m.b980 + m.b992 + m.b995 + m.b998 >= 0)
m.e1929 = Constraint(expr= -m.b981 + m.b993 + m.b996 + m.b999 >= 0)
m.e1930 = Constraint(expr= -m.b991 + m.b1009 >= 0)
m.e1931 = Constraint(expr= -m.b992 + m.b1010 >= 0)
m.e1932 = Constraint(expr= -m.b993 + m.b1011 >= 0)
m.e1933 = Constraint(expr= -m.b994 + m.b1012 + m.b1015 >= 0)
m.e1934 = Constraint(expr= -m.b995 + m.b1013 + m.b1016 >= 0)
m.e1935 = Constraint(expr= -m.b996 + m.b1014 + m.b1017 >= 0)
m.e1936 = Constraint(expr= -m.b997 + m.b1018 + m.b1021 + m.b1024 >= 0)
m.e1937 = Constraint(expr= -m.b998 + m.b1019 + m.b1022 + m.b1025 >= 0)
m.e1938 = Constraint(expr= -m.b999 + m.b1020 + m.b1023 + m.b1026 >= 0)
m.e1939 = Constraint(expr= m.b973 - m.b982 >= 0)
m.e1940 = Constraint(expr= m.b974 - m.b983 >= 0)
m.e1941 = Constraint(expr= m.b975 - m.b984 >= 0)
m.e1942 = Constraint(expr= m.b973 - m.b985 >= 0)
m.e1943 = Constraint(expr= m.b974 - m.b986 >= 0)
m.e1944 = Constraint(expr= m.b975 - m.b987 >= 0)
m.e1945 = Constraint(expr= m.b982 - m.b1000 >= 0)
m.e1946 = Constraint(expr= m.b983 - m.b1001 >= 0)
m.e1947 = Constraint(expr= m.b984 - m.b1002 >= 0)
m.e1948 = Constraint(expr= m.b985 - m.b1003 >= 0)
m.e1949 = Constraint(expr= m.b986 - m.b1004 >= 0)
m.e1950 = Constraint(expr= m.b987 - m.b1005 >= 0)
m.e1951 = Constraint(expr= m.b976 - m.b988 >= 0)
m.e1952 = Constraint(expr= m.b977 - m.b989 >= 0)
m.e1953 = Constraint(expr= m.b978 - m.b990 >= 0)
m.e1954 = Constraint(expr= m.b988 - m.b1006 >= 0)
m.e1955 = Constraint(expr= m.b989 - m.b1007 >= 0)
m.e1956 = Constraint(expr= m.b990 - m.b1008 >= 0)
m.e1957 = Constraint(expr= m.b988 - m.b1009 >= 0)
m.e1958 = Constraint(expr= m.b989 - m.b1010 >= 0)
m.e1959 = Constraint(expr= m.b990 - m.b1011 >= 0)
m.e1960 = Constraint(expr= m.b979 - m.b991 >= 0)
m.e1961 = Constraint(expr= m.b980 - m.b992 >= 0)
m.e1962 = Constraint(expr= m.b981 - m.b993 >= 0)
m.e1963 = Constraint(expr= m.b979 - m.b994 >= 0)
m.e1964 = Constraint(expr= m.b980 - m.b995 >= 0)
m.e1965 = Constraint(expr= m.b981 - m.b996 >= 0)
m.e1966 = Constraint(expr= m.b979 - m.b997 >= 0)
m.e1967 = Constraint(expr= m.b980 - m.b998 >= 0)
m.e1968 = Constraint(expr= m.b981 - m.b999 >= 0)
m.e1969 = Constraint(expr= m.b994 - m.b1012 >= 0)
m.e1970 = Constraint(expr= m.b995 - m.b1013 >= 0)
m.e1971 = Constraint(expr= m.b996 - m.b1014 >= 0)
m.e1972 = Constraint(expr= m.b994 - m.b1015 >= 0)
m.e1973 = Constraint(expr= m.b995 - m.b1016 >= 0)
m.e1974 = Constraint(expr= m.b996 - m.b1017 >= 0)
m.e1975 = Constraint(expr= m.b997 - m.b1018 >= 0)
m.e1976 = Constraint(expr= m.b998 - m.b1019 >= 0)
m.e1977 = Constraint(expr= m.b999 - m.b1020 >= 0)
m.e1978 = Constraint(expr= m.b997 - m.b1021 >= 0)
m.e1979 = Constraint(expr= m.b998 - m.b1022 >= 0)
m.e1980 = Constraint(expr= m.b999 - m.b1023 >= 0)
m.e1981 = Constraint(expr= m.b997 - m.b1024 >= 0)
m.e1982 = Constraint(expr= m.b998 - m.b1025 >= 0)
m.e1983 = Constraint(expr= m.b999 - m.b1026 >= 0)
m.e1984 = Constraint(expr= m.b907 + m.b910 - m.b913 >= 0)
m.e1985 = Constraint(expr= m.b908 + m.b911 - m.b914 >= 0)
m.e1986 = Constraint(expr= m.b909 + m.b912 - m.b915 >= 0)
m.e1987 = Constraint(expr= m.b907 + m.b910 - m.b916 >= 0)
m.e1988 = Constraint(expr= m.b908 + m.b911 - m.b917 >= 0)
m.e1989 = Constraint(expr= m.b909 + m.b912 - m.b918 >= 0)
m.e1990 = Constraint(expr= m.b907 + m.b910 - m.b919 >= 0)
m.e1991 = Constraint(expr= m.b908 + m.b911 | |
<gh_stars>0
# Copyright 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import configparser as ConfigParser
except ImportError:
# Python 2 fallback
import ConfigParser
import argparse
import array
import fcntl
import os
import re
import shutil
import stat
import sys
import tarfile
import termios
from pty import _read as pty_read
from pty import _copy as pty_copy
import pty
import threading
import tty
try:
from collections import OrderedDict
except ImportError:
# Python 2.6 fallback
from ordereddict import OrderedDict
from os import path
from subprocess import Popen, PIPE
from tempfile import mkdtemp
ENV_MATCH = re.compile(r'([_A-Z0-9]+)=(.*)')
DEFAULT_MANIFEST = {
'Version': '20130611',
'Memory': '%d' % (4 * 1024 * 1024 * 1024),
'Node': 1,
'Timeout': 50
}
DEFAULT_LIMITS = {
'reads': str(1024 * 1024 * 1024 * 4),
'rbytes': str(1024 * 1024 * 1024 * 4),
'writes': str(1024 * 1024 * 1024 * 4),
'wbytes': str(1024 * 1024 * 1024 * 4)
}
CHANNEL_SEQ_READ_TEMPLATE = 'Channel = %s,%s,0,0,%s,%s,0,0'
CHANNEL_SEQ_WRITE_TEMPLATE = 'Channel = %s,%s,0,0,0,0,%s,%s'
CHANNEL_RANDOM_RW_TEMPLATE = 'Channel = %s,%s,3,0,%s,%s,%s,%s'
CHANNEL_RANDOM_RO_TEMPLATE = 'Channel = %s,%s,3,0,%s,%s,0,0'
DEBUG_TEMPLATE = '''set confirm off
b CreateSession
r
b main
add-symbol-file %s 0x440a00020000
shell clear
c
d br
'''
NVRAM_TEMPLATE = """\
[args]
args = %(args)s
[fstab]
%(fstab)s
[mapping]
%(mapping)s"""
CHANNEL_MAPPING_TEMPLATE = "channel=/dev/%s,mode=%s\n"
MANIFEST_TEMPLATE = """\
Node = %(node)s
Version = %(version)s
Timeout = %(timeout)s
Memory = %(memory)s
Program = %(program)s
%(channels)s"""
MANIFEST_DEFAULTS = dict(
version='20130611',
memory=4294967296,
node=1,
timeout=50,
)
GETS_DEFAULT = 4294967296
GET_SIZE_DEFAULT_BYTES = 4294967296
PUTS_DEFAULT = 4294967296
PUT_SIZE_DEFAULT_BYTES = 4294967296
SEQ_READ_SEQ_WRITE = 0
RND_READ_SEQ_WRITE = 1
SEQ_READ_RND_WRITE = 2
RND_READ_RND_WRITE = 3
_DEFAULT_MOUNT_DIR = '/'
_DEFAULT_MOUNT_ACCESS = 'ro'
ZEROVM_EXECUTABLE = 'zerovm'
ZEROVM_OPTIONS = '-PQ'
DEBUG_EXECUTABLE = 'zerovm-dbg'
DEBUG_OPTIONS = '-sPQ'
GDB = 'x86_64-nacl-gdb'
class Channel(object):
"""
Definition of a channel within a manifest. Defines a mapping from the host
to the ZeroVM filesystem, access type, and read/write limits.
:param uri:
Path to a local file, pipe, character device, tcp socket or host ID.
:param alias:
Path where this channel will be mounted in ZeroVM.
:param access_type:
Choose from the following:
* 0: sequential read/ sequential write
* 1: random read/ sequential write
* 2: sequential read / random write
* 3: random read / random write
:param etag:
etag switch; can be in the range 0..1
Default: 0
:param gets:
Limit for number of reads from this channel.
Default: 4294967296
:param get_size:
Limit on total amount of data to read from this channel, in bytes.
Default: 4294967296
:param puts:
Limit for number of writes to this channel.
Default: 4294967296
:param put_size:
Limit on total amount of data to be written to this channel, in bytes.
Default: 4294967296
"""
def __init__(self, uri, alias, access_type,
etag=0,
gets=GETS_DEFAULT,
get_size=GET_SIZE_DEFAULT_BYTES,
puts=PUTS_DEFAULT,
put_size=PUT_SIZE_DEFAULT_BYTES):
self.uri = uri
self.alias = alias
self.access_type = access_type
self.etag = etag
self.gets = gets
self.get_size = get_size
self.puts = puts
self.put_size = put_size
def __str__(self):
return 'Channel = %s,%s,%s,%s,%s,%s,%s,%s' % (
self.uri, self.alias, self.access_type, self.etag,
self.gets, self.get_size, self.puts, self.put_size
)
def __repr__(self):
return '<%s>' % self.__str__()
class Manifest(object):
"""
Object representation of a ZeroVM manifest. Includes utilities and sane
defaults for generating manifest files.
"""
DEFAULT_NODE = 1
def __init__(self, version, timeout, memory, program, node=DEFAULT_NODE,
etag=0, channels=None):
self.version = version
self.timeout = timeout
self.memory = memory
self.program = program
self.node = node
self.etag = etag
self.channels = channels
if self.channels is None:
self.channels = []
@classmethod
def default_manifest(cls, basedir, program):
channels = [
Channel('/dev/stdin', '/dev/stdin', SEQ_READ_SEQ_WRITE, puts=0,
put_size=0),
Channel(path.join(basedir, 'stdout.%s' % cls.DEFAULT_NODE),
'/dev/stdout', SEQ_READ_SEQ_WRITE, gets=0, get_size=0),
Channel(path.join(basedir, 'stderr.%s' % cls.DEFAULT_NODE),
'/dev/stderr', SEQ_READ_SEQ_WRITE, gets=0, get_size=0),
Channel(path.join(basedir, 'nvram.%s' % cls.DEFAULT_NODE),
'/dev/nvram', RND_READ_RND_WRITE),
]
return Manifest(MANIFEST_DEFAULTS['version'],
MANIFEST_DEFAULTS['timeout'],
MANIFEST_DEFAULTS['memory'],
program,
channels=channels)
def dumps(self):
"""
Get the text representation of the manifest.
"""
if not self.channels:
raise RuntimeError("Manifest must have at least 1 channel.")
manifest = MANIFEST_TEMPLATE
manifest %= dict(
node=self.node,
version=self.version,
timeout=self.timeout,
memory='%s,%s' % (self.memory, self.etag),
program=self.program,
channels='\n'.join([str(c) for c in self.channels]),
)
return manifest
class NVRAM(object):
"""
:param program_args:
A `list` of the command args to be run inside ZeroVM. In the case of a
Python application, this would be something like:
['python', '-c', 'print "hello, world"']
:param processed_images:
A `list` 3-tuples containing (image path, mount point, access). See
:func:`_process_images` for more details.
:param env:
Optional. `dict` of environment settings from zvsh.cfg.
:param int debug_verbosity:
Optional. Debug verbosity level, in the range 0..4.
"""
def __init__(self, program_args, processed_images, env=None,
debug_verbosity=None):
# TODO(larsbutler): What about the [debug] and [env] sections?
self.program_args = program_args
self.processed_images = processed_images
self.env = env
self.debug_verbosity = debug_verbosity
def dumps(self):
"""
Generate the text for an nvram file.
"""
nvram_text = NVRAM_TEMPLATE
fstab_channels = []
for i, (zvm_image, mount_point, access) in enumerate(
self.processed_images, start=1):
device = '/dev/%s.%s' % (i, path.basename(zvm_image))
fstab_channel = (
'channel=%(device)s,mountpoint=%(mount_point)s,'
'access=%(access)s,removable=no'
% dict(device=device, mount_point=mount_point, access=access)
)
fstab_channels.append(fstab_channel)
mapping = ''
if sys.stdin.isatty():
mapping += 'channel=/dev/stdin,mode=char\n'
if sys.stdout.isatty():
mapping += 'channel=/dev/stdout,mode=char\n'
if sys.stderr.isatty():
mapping += 'channel=/dev/stderr,mode=char\n'
# When ZRT presents a program with its argv, it parses the
# nvram file. This parser is very simple. It will choke on ','
# (it treats comma the same as newline) and it will split the
# command line on ' '. We must therefore escape each argument
# individually before joining them.
args = ' '.join(map(_nvram_escape, self.program_args))
nvram_text %= dict(
args=args,
fstab='\n'.join(fstab_channels),
mapping=mapping,
env='\n'.join([]),
)
if self.env is not None:
nvram_text += '[env]\n'
for k, v in self.env.items():
nvram_text += 'name=%s,value=%s\n' % (k, _nvram_escape(v))
if self.debug_verbosity is not None:
nvram_text += '[debug]\nverbosity=%s\n' % self.debug_verbosity
return nvram_text
def _nvram_escape(value):
r"""Escape value for inclusion as a value in a nvram file.
The ini-file parser in ZRT is very simple. One quirk is that it
handles ',' the same as '\n', which means that a value like
greeting = Hello, World
will be cut-off after "Hello".
Values also need protection in other ways:
* When "args" are loaded, the value is split on ' ' and each
argument found is then unescaped. This means that each arg need
to have ' ' escaped.
* When a "value" is loaded in [env], it is unescaped. It must
therefore also be escaped.
This function escapes '\\', '"', ',', ' ', and '\n'. These are the
characters that conf_parser::unescape_string_copy_to_dest is
documented to handle and they are sufficient to handle the above
use cases.
>>> _nvram_escape('foo, bar')
'foo\\x2c\\x20bar'
>>> _nvram_escape('new\nline')
'new\\x0aline'
"""
for c in '\\", \n':
value = value.replace(c, '\\x%02x' % ord(c))
return value
def _process_images(zvm_images):
"""
Process a list of the --zvm-image arguments and split them into the
`path,mount_point,access_type` components. This returns a generator of
3-tuples.
`mount_point` and `access_type` are optional and will default to `/` and
`ro`, respectively.
Example:
>>> list(_process_images(['/home/user1/foo.tar',
... '/home/user1/bar.tar,/var/lib',
... '/home/user1/baz.tar,/usr/lib,rw']))
[('/home/user1/foo.tar', '/', 'ro'), \
('/home/user1/bar.tar', '/var/lib', 'ro'), \
('/home/user1/baz.tar', '/usr/lib', 'rw')]
"""
for image in zvm_images:
image_split = image.split(',')
# mount_dir and access_type are optional,
# so defaults are provided:
mount_dir = _DEFAULT_MOUNT_DIR
access_type = _DEFAULT_MOUNT_ACCESS
if len(image_split) == 1:
path = image_split[0]
elif len(image_split) == 2:
path, mount_dir = image_split
elif len(image_split) == 3:
path, mount_dir, access_type = image_split
yield path, mount_dir, access_type
def create_manifest(working_dir, program_path, manifest_cfg, tar_files,
limits_cfg):
"""
:param manifest_cfg:
`dict` containing the following keys:
* Node
* Version
* Timeout
* Memory
:param limits_cfg:
`dict` containing the following keys:
* reads
* rbytes
* writes
* wbytes
"""
manifest = Manifest.default_manifest(working_dir, program_path)
manifest.node = manifest_cfg['Node']
manifest.version = manifest_cfg['Version']
manifest.timeout = manifest_cfg['Timeout']
manifest.memory = manifest_cfg['Memory']
for i, tar_file in enumerate(tar_files, start=1):
mount_point = '/dev/%s.%s' % (i, path.basename(tar_file))
ch = Channel(
tar_file, mount_point, access_type=RND_READ_RND_WRITE,
gets=limits_cfg['reads'],
get_size=limits_cfg['rbytes'],
puts=limits_cfg['writes'],
put_size=limits_cfg['wbytes'],
)
manifest.channels.append(ch)
return manifest
def _get_runtime_file_paths(working_dir, node):
"""
Generate the runtime files paths for boot, manifest, nvram, stdout, and
stderr files, and return them as a `OrderedDict` with the following
structure:
>>> _get_runtime_file_paths('/home/user1', 1)
OrderedDict([('boot', '/home/user1/boot.1'), \
('manifest', '/home/user1/manifest.1'), \
('nvram', '/home/user1/nvram.1'), \
('stdout', '/home/user1/stdout.1'), \
('stderr', '/home/user1/stderr.1')])
Note that that paths are created by simply joining `working_dir`, so
relatve file paths can be used as well:
>>> _get_runtime_file_paths('foo/', 1)
| |
varps = []
a, b, varp = pylab.hist(diff,bins=arange(-0.2,0.2,0.016))
#print a,b,varp
varps.append(varp[0])
diff_cut = []
for d in range(len(diff)):
if abs(d) < 0.25:
diff_cut.append(diff[d])
list = scipy.array(diff_cut)
mu = list.mean()
median = scipy.median(diff_cut)
sigma = list.std()
print 'mu', mu
print 'sigma', sigma
sigma = 0.06
print ' len(z)=',len(z) , ' len(diff)=',len(diff)
reject = []
for line in results:
diff_val = (line[0] - line[1] - median)/(1 + line[1])
if abs(diff_val)>3*sigma: reject.append(line[2])
print reject
from scipy import stats
fit_a, fit_b, fit_varp = pylab.hist(diff_cut,bins=arange(-0.2,0.2,0.016))
pdf = scipy.stats.norm.pdf(fit_b, mu, sigma)
print 'pdf', pdf
height = scipy.array(a).max()
print pdf
pylab.plot(fit_b,len(diff_cut)*pdf/pdf.sum(),'r')
pylab.xlabel("(PhotZ - SpecZ)/(1 + SpecZ)")
pylab.ylabel("Number of Galaxies")
pylab.show()
pylab.savefig(name + 'RedshiftErrors.ps')
pylab.clf()
import scipy, numpy
from scipy import optimize
A = numpy.hstack((scipy.array(z)[:,numpy.newaxis],numpy.ones(len(z))[:,numpy.newaxis]))
#print A
#print scipy.shape(A)
#print scipy.shape(scipy.array(diff))
#(m,b), resids, rank, s = scipy.linalg.basic.lstsq(A,scipy.array(diff))
#pylab.plot(z,m*z+b,label='best-fit')
pylab.scatter(z_spec,z)
pylab.plot(scipy.array([0,1]),scipy.array([0,1]),color='red')
pylab.xlim(0,1)
pylab.ylim(0,1)
#pylab.ylabel("(PhotZ - SpecZ)/(1 + SpecZ)")
pylab.xlabel("PhotZ")
pylab.show()
pylab.savefig(name + 'RedshiftScatter.ps')
pylab.clf()
return reject
def get_cluster_z(file):
import ldac, numpy
f = ldac.openObjectFile(file)
arr = numpy.zeros(151)
for iz in f['Z']:
#print iz
n=int(iz*100.)
if n>150:
n=150
if n < 0:
n=0
#print "filling ",n
arr[n]= arr[n]+1
max = 0
maxind=0
for i in range(151):
#print max , maxind,arr[i]
if arr[i]>max:
max=arr[i]
maxind=i
Z = float(maxind)/100.
print Z
return Z
def join_cats(cs,outputfile):
tables = {}
i = 0
cols = []
seqnr = 0
for c in cs:
if len(c) == 2:
TAB = c[1]
c = c[0]
else: TAB = 'STDTAB'
i += 1
print c
tables[str(i)] = pyfits.open(c)
for column in tables[str(i)][TAB].columns:
if column.name == 'SeqNr':
if not seqnr:
seqnr += 1
else:
column.name = column.name + '_' + str(seqnr)
seqnr += 1
cols.append(column)
#print cols
print len(cols)
hdu = pyfits.PrimaryHDU()
hduSTDTAB = pyfits.BinTableHDU.from_columns(cols)
hdulist = pyfits.HDUList([hdu])
hdulist.append(hduSTDTAB)
hdulist[1].header['EXTNAME']='STDTAB'
import os
os.system('rm ' + outputfile)
print outputfile
hdulist.writeto(outputfile)
def parse(file,filters,constantFilter, columns,cluster):
import re
#filters = re.split('\,',filters[:-1])
filter_off = {}
filter_off_wild = {}
if True:
print file
f = open(file).readlines()
import string
for line in f:
if string.find(line,'SHIFTS') != -1:
shifts = line
res = re.split('\s+',shifts.replace(',',''))[2:-1]
shifts_v = res
break
print res
for i in range(len(filters)):
filter_off[filters[i]] = res[i]
filter_off_wild[filters[i].replace('-1-','%').replace('-2-','%').replace('-3-','%')] = res[i]
res_fix = []
''' now apply same offsets to chips from the same filter '''
for i in range(len(filters)):
zo = float(res[i])
if zo == 0:
zo = filter_off_wild[filters[i].replace('-1-','%').replace('-2-','%').replace('-3-','%')]
print zo
res_fix.append(str(zo))
print res_fix
print filter_off
import photometry_db
photometry_db.initConnection()
''' save to database '''
for filt in filters:
''' now loop over apertures '''
print cluster, filt, float(filter_off[filter])
slrZP = photometry_db.registerLePhareZP(cluster, filt, constantFilter, float(filter_off[filter]))
#print shifts, res
print columns
raw = open(columns,'r').readlines()
i = -1
filen = columns.replace('.replace','')
out = open(filen,'w')
for line in raw:
if string.find(line,'AB')!=-1:
i += 1
if i < len(res):
''' sign on shifts is opposite !!! '''
#line = line.replace('REPLACE',str(-1.*float(res[i])))
line = line.replace('REPLACE',str(0))
line = line.replace('\n','')
if len(line) > 0:
out.write(line + '\n')
out.close()
return res_fix
#shifts_v = res = ['0.66','0','0','-0.095','0.228','0.23','0','0','0.36','-0.15','0.002','0.244373']
def apply_shifts(file, filters, columns ):
shifts_v = res = ['0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0'][0:len(filters)]
import string
#print shifts, res
print columns
raw = open(columns,'r').readlines()
i = -1
filen = columns.replace('.replace','')
out = open(filen,'w')
for line in raw:
if string.find(line,'AB')!=-1:
i += 1
if i < len(res):
line = line.replace('REPLACE',res[i])
line = line.replace('\n','')
if len(line) > 0:
out.write(line + '\n')
out.close()
return shifts_v
def parseeazy(catalog,n):
from utilities import run
import os
f = open(catalog,'r').readlines()
sntmp = open('sntmp','w')
keys = []
for line in f:
if line[0:2] == '# ':
import re
res2 = re.split('\s+',line[:-1])
print res2
for k in res2[1:]:
keys.append('EAZY_' + k)
break
if line[0] != '#':
break
print keys
tempconf = '/tmp/' + os.environ['USER'] + 'photoz.conf'
conflist = open(tempconf,'w')
for key in keys:
if key == 'EAZY_id' :
conflist.write('COL_NAME = SeqNr\nCOL_TTYPE = LONG\nCOL_HTYPE = INT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n')
else:
conflist.write('COL_NAME = ' + key + '\nCOL_TTYPE = DOUBLE\nCOL_HTYPE = FLOAT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n')
conflist.close()
import os
tempcat = '/tmp/' + os.environ['USER'] + 'zs.cat'
run('asctoldac -i ' + catalog + ' -o ' + catalog + '.temp.tab' + ' -c ' + tempconf + ' -t STDTAB',[tempcat] )
command = 'ldacaddkey -i ' + catalog + '.temp.tab -o ' + catalog + '.tab -t STDTAB -k EAZY_NUMBER ' + str(n) + ' FLOAT "" '
print command
os.system(command)
print catalog + '.tab'
def parsebpz(catalog,n):
'''this adds BPZ_NUMBER on the end, but it's always =0 currently (see /u/ki/awright/data/MACS1226+21/PHOTOMETRY_W-C-RC_aper/all_bpzAPER1CWWSB_capak.list1_0.bpz.tab.txt)'''
import os,re
from utilities import run
f = open(catalog,'r').readlines()
sntmp = open(os.environ['USER'] + 'sntmp','w')
keys = []
for line in f:
if line[0:2] == '# ':
res2 = re.split('\s+',line[:-1])
print res2
keys.append('BPZ_' + res2[2])
if line[0] != '#':
break
tempconf = '/tmp/' + os.environ['USER'] + 'photoz.conf'
conflist = open(tempconf,'w')
for key in keys:
if key == 'BPZ_ID' :
conflist.write('COL_NAME = SeqNr\nCOL_TTYPE = LONG\nCOL_HTYPE = INT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n')
else:
conflist.write('COL_NAME = ' + key + '\nCOL_TTYPE = DOUBLE\nCOL_HTYPE = FLOAT\nCOL_COMM = ""\nCOL_UNIT = ""\nCOL_DEPTH = 1\n#\n')
conflist.close()
tempcat = '/tmp/' + os.environ['USER'] + 'zs.cat'
run('asctoldac -i ' + catalog + ' -o ' + catalog + '.temp.tab' + ' -c ' + tempconf + ' -t STDTAB',[tempcat] )
command = 'ldacaddkey -i ' + catalog + '.temp.tab -o ' + catalog + '.tab -t STDTAB -k BPZ_NUMBER ' + str(n) + ' FLOAT "" '
print ' command=',command
os.system(command)
print catalog + '.tab'
print 'here'
def get_filters(cat,tab='STDTAB',SPECTRA=None):
import string
dict = {}
p = pyfits.open(cat)
#print p[tab].columns
for column in p[tab].columns:
import re
res = re.split('-',column.name)
#if len(res) > 1 and (string.find(column.name,'SUBARU') != -1 or string.find(column.name,'MEGA')!=-1 or string.find(column.name,'WIR')!=-1) and string.find(column.name,'1-u') == -1 and string.find(column.name,'SUBARU-9') == -1:
''' 1423 u-band image is bad '''
use = False
if len(res) > 1 and string.find(column.name,'W-J-U') == -1 and string.find(column.name,'FWHM')==-1 and string.find(column.name,'COADD')==-1 and string.find(column.name,'MAG')!=-1 and string.find(column.name,'--')==-1:
if SPECTRA == 'CWWSB_capak_ubvriz.list':
use = len(filter(lambda x:x,[string.find(column.name,f)!=-1 for f in ['-u','W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']]))
elif SPECTRA == 'CWWSB_capak_u.list':
use = len(filter(lambda x:x,[string.find(column.name,f)!=-1 for f in ['W-J-B','W-J-V','W-C-RC','W-C-IC','W-S-Z+']]))
elif SPECTRA == 'CWWSB_capak_ub.list':
use = len(filter(lambda x:x,[string.find(column.name,f)!=-1 for f in ['W-J-V','W-C-RC','W-S-I+','W-C-IC','W-S-Z+']]))
elif SPECTRA == 'CWWSB_capak_uz.list':
use = len(filter(lambda x:x,[string.find(column.name,f)!=-1 for f in ['W-J-B','W-J-V','W-C-RC','W-C-IC']]))
else:
use = True
if string.find(column.name,'SUBARU') != -1 and (string.find(column.name,'10') == -1 and string.find(column.name,'9') == -1) and string.find(column.name,'8')==-1:
use = False
if string.find(column.name,'MEGAPRIME') != -1 and (string.find(column.name,'1') == -1 and string.find(column.name,'0') == -1):
use = False
if string.find(cat,'A370') != -1 and (string.find(column.name,'W-S-I+') != -1 or string.find(column.name,'8') != -1):
use = False
if string.find(cat, 'HDFN') != -1 and (string.find(column.name,'SUBARU-9') != -1 or string.find(column.name,'W-S-I+')!= -1 or string.find(column.name,'-2-') != -1): # or string.find(column.name,'u') != -1):
use = False
#if string.find(cat,'HDFN') != -1 and (string.find(column.name,'W-S-Z+') != -1):
# use = False
if string.find(cat,'A383') != -1 and (string.find(column.name,'u') != -1): # or string.find(column.name,'W-J-V') != -1):
use = False
#string.find(column.name,'SUBARU-9') != -1 or
''' remove WHT data, and u-band data '''
if string.find(column.name,'WH') != -1 or string.find(column.name,'u') != -1 or string.find(column.name,'-U') != -1: # or string.find(column.name,'B') != -1: # or (string.find(column.name,'B') != -1 and string.find(column.name,'9') != -1): # is False:
use = False
#if string.find(column.name,'W-S-I+') != -1: # or string.find(column.name,'B') != -1: # or (string.find(column.name,'B') != -1 and string.find(column.name,'9') != -1): # is False:
# use = False
if False: #string.find(cat,'HDFN') != -1 and (string.find(column.name,'W-J-B') != -1 and string.find(column.name,'9') != -1):
use = False
#if string.find(cat,'HDFN') != -1 and string.find(column.name,'W-S-Z') != -1:
# use = False
''' throw out early data '''
#if string.find(column.name,'SUBARU') != -1 and (string.find(column.name,'9') != -1 or string.find(column.name,'8')!=-1):
# use = False
# and string.find(column.name,'1-u') == -1: # and string.find(column.name,'W-J-B') == -1 : #or string.find(column.name,'MEGA')!=-1 or string.find(column.name,'WIR')!=-1): # and string.find(column.name,'1-u') == -1: # and string.find(column.name,'SUBARU-9') == -1: # and string.find(column.name,'10_1') == -1: #
# and string.find(column.name,'1-u') == -1
if use:
try:
dummy = int(res[-1])
except:
filt = reduce(lambda x,y: x+'-'+y,res[1:])
dict[filt] = 'yes'
if False: #string.find(filt,'WHT') != -1:
print column.name, res, filt
#print res, filter, column
filters = dict.keys()
print filters
return filters
def figure_out_slr_chip(filters,catalog,tab='STDTAB',magtype='APER1'):
#magtype='APER1'
print magtype, 'magtype'
import string
print catalog
table = pyfits.open(catalog)[tab].data
stdfilts = {}
| |
<gh_stars>0
import abc
import inspect
import logging
import time
import typing
from d3m import exceptions, types, utils
from d3m.metadata import base as metadata_base, hyperparams, params, problem
__all__ = (
'Inputs', 'Outputs', 'Params', 'Hyperparams', 'CallResult', 'MultiCallResult', 'DockerContainer',
'PrimitiveBase', 'ContinueFitMixin', 'SamplingCompositionalityMixin',
'ProbabilisticCompositionalityMixin', 'Gradients',
'GradientCompositionalityMixin', 'LossFunctionMixin',
'NeuralNetworkModuleMixin', 'NeuralNetworkObjectMixin',
'singleton', 'inputs_across_samples',
)
Inputs = typing.TypeVar('Inputs', bound=typing.Union[types.Container]) # type: ignore
Outputs = typing.TypeVar('Outputs', bound=typing.Union[types.Container]) # type: ignore
# This type parameter is optional and can be set to None.
# See "TransformerPrimitiveBase" for an example.
Params = typing.TypeVar('Params', bound=params.Params)
Hyperparams = typing.TypeVar('Hyperparams', bound=hyperparams.Hyperparams)
Module = typing.TypeVar('Module')
T = typing.TypeVar('T')
# All base classes (primitive interfaces) should have docstrings starting with this language.
# This allows us to validate that primitives have changed their descriptions/docstrings to something different.
DEFAULT_DESCRIPTION = "A base class for primitives"
class CallResult(typing.Generic[T]):
"""
Some methods return additional metadata about the method call itself
(which is different to metadata about the value returned, which is stored
in ``metadata`` attribute of the value itself).
For ``produce`` method call, ``has_finished`` is ``True`` if the last call
to ``produce`` has produced the final outputs and a call with more time or
more iterations cannot get different outputs.
For ``fit`` method call, ``has_finished`` is ``True`` if a primitive has been
fully fitted on current training data and further calls to ``fit`` are
unnecessary and will not change anything. ``False`` means that more iterations
can be done (but it does not necessary mean that more iterations are beneficial).
If a primitive has iterations internally, then ``iterations_done`` contains
how many of those iterations have been made during the last call. If primitive
does not support them, ``iterations_done`` is ``None``.
Those methods should return value wrapped into this class.
Parameters
----------
value:
The value itself of the method call.
has_finished:
Set to ``True`` if it is not reasonable to call the method again anymore.
iterations_done:
How many iterations have been done during a method call, if any.
"""
def __init__(self, value: T, has_finished: bool = True, iterations_done: int = None) -> None:
self.value = value
self.has_finished = has_finished
self.iterations_done = iterations_done
class MultiCallResult:
"""
Similar to `CallResult`, but used by ``multi_produce``.
It has no precise typing information because type would have to be a dependent type
which is not (yet) supported in standard Python typing. Type would depend on
``produce_methods`` argument and output types of corresponding produce methods.
Parameters
----------
values:
A dict of values mapping between produce method names and their value outputs.
has_finished:
Set to ``True`` if it is not reasonable to call the method again anymore.
iterations_done:
How many iterations have been done during a method call, if any.
"""
def __init__(self, values: typing.Dict, has_finished: bool = True, iterations_done: int = None) -> None:
self.values = values
self.has_finished = has_finished
self.iterations_done = iterations_done
class PrimitiveBaseMeta(utils.GenericMetaclass):
"""
A metaclass which provides the primitive instance to metadata so that primitive
metadata can be automatically generated.
"""
def __new__(mcls, class_name, bases, namespace, **kwargs): # type: ignore
cls = super().__new__(mcls, class_name, bases, namespace, **kwargs)
if inspect.isabstract(cls):
return cls
if not isinstance(cls.metadata, metadata_base.PrimitiveMetadata):
raise TypeError("'metadata' attribute is not an instance of PrimitiveMetadata.")
# We are creating a class-level logger so that it can be used both from class and instance methods.
# "python_path" is a required metadata value, but we leave metadata validation to later.
python_path = cls.metadata.query().get('python_path', None)
if python_path is not None:
cls.logger = logging.getLogger(python_path)
cls.metadata.contribute_to_class(cls)
return cls
def __repr__(cls) -> str:
if getattr(cls, 'metadata', None) is not None:
return cls.metadata.query().get('python_path', super().__repr__())
else:
return super().__repr__()
class DockerContainer(typing.NamedTuple):
"""
A tuple suitable to describe connection information necessary to connect
to exposed ports of a running Docker container.
Attributes
----------
address:
An address at which the Docker container is available.
ports:
Mapping between image's exposed ports and real ports. E.g.,
``{'80/tcp': 80}``.
"""
address: str
ports: typing.Dict[str, int]
class PrimitiveBase(typing.Generic[Inputs, Outputs, Params, Hyperparams], metaclass=PrimitiveBaseMeta):
"""
A base class for primitives.
Class is parameterized using four type variables, ``Inputs``, ``Outputs``, ``Params``,
and ``Hyperparams``.
``Params`` has to be a subclass of `d3m.metadata.params.Params` and should define
all fields and their types for parameters which the primitive is fitting.
``Hyperparams`` has to be a subclass of a `d3m.metadata.hyperparams.Hyperparams`.
Hyper-parameters are those primitive's parameters which primitive is not fitting and
generally do not change during a life-time of a primitive.
``Params`` and ``Hyperparams`` have to be picklable and copyable. See `pickle`,
`copy`, and `copyreg` Python modules for more information.
In this context we use term method arguments to mean both formal parameters and
actual parameters of a method. We do this to not confuse method parameters with
primitive parameters (``Params``).
All arguments to all methods are keyword-only. No ``*args`` or ``**kwargs`` should
ever be used in any method.
Standardized interface use few public attributes and no other public attributes are
allowed to assure future compatibility. For your attributes use the convention that
private symbols should start with ``_``.
Primitives can have methods which are not part of standardized interface classes:
* Additional "produce" methods which are prefixed with ``produce_`` and have
the same semantics as ``produce`` but potentially return different output
container types instead of ``Outputs`` (in such primitive ``Outputs`` is seen as
primary output type, but the primitive also has secondary output types).
They should return ``CallResult`` and have ``timeout`` and ``iterations`` arguments.
* Private methods prefixed with ``_``.
No other public additional methods are allowed. If this represents a problem for you,
open an issue. (The rationale is that for other methods an automatic system will not
understand the semantics of the method.)
Method arguments which start with ``_`` are seen as private and can be used for arguments
useful for debugging and testing, but they should not be used by (or even known to) a
caller during normal execution. Such arguments have to be optional (have a default value)
so that the method can be called without the knowledge of the argument.
All arguments to all methods and all hyper-parameters together are seen as arguments to
the primitive as a whole. They are identified by their names. This means that any argument
name must have the same type and semantics across all methods, effectively be the same argument.
If a method argument matches in name a hyper-parameter, it has to match it in type and semantics
as well. Such method argument overrides a hyper-parameter for a method call. All this is necessary
so that callers can have easier time determine what values to pass to arguments and that it is
easier to describe what all values are inputs to a primitive as a whole (set of all
arguments).
To recap, subclasses can extend arguments of standard methods with explicit typed keyword
arguments used for the method call, or define new "produce" methods with arbitrary explicit
typed keyword arguments. There are multiple kinds of such arguments allowed:
* An (additional) input argument of any container type and not necessary of ``Inputs``
(in such primitive ``Inputs`` is seen as primary input type, but the primitive also has
secondary input types).
* An argument which is overriding a hyper-parameter for the duration of the call.
It should match a hyper-parameter in name and type. It should be a required argument
(no default value) which the caller has to supply (or with a default value of a
hyper-parameter, or with the same hyper-parameter as it was passed to the constructor,
or with some other value). This is meant just for fine-control by a caller during fitting
or producing, e.g., for a threshold or learning rate, and is not reasonable for most
hyper-parameters.
* An (additional) value argument which is one of standard data types, but not a container type.
In this case a caller will try to satisfy the input by creating part of a pipeline which
ends with a primitive with singleton produce method and extract the singleton value and
pass it without a container. This kind of an argument is | |
* phase * qm)
# Calculate polarisation with incident neutron
sf[n] = np.dot(sfm, incident_polarisation_vector)
return sf
def sf_magnetic_xray(q, r, moment, magnetic_formfactor=None, occ=None, debyewaller=None, **kwargs):
"""
Calculate the non-resonant magnetic component of the structure factor
:param q: [n,3] array of hkl reflections
:param r: [m,3] array of atomic positions in r.l.u.
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param magnetic_formfactor: [n,m] array of magnetic form factors for each atom and relection
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection
:param kwargs: additional options[*unused]
:return sf: [n] complex array of structure factors
f_non-res_mag = i.r0.(hw/mc^2).fD.[.5.L.A + S.B]
B = e_o X e_i + (k_o X e_o) * k_o.e_i - (k_i X e_i) * k_i.e_o - (k_o X e_o) X (k_i X e_i)
- ignore orbital moment L
- fD = magnetic form factor
- S = spin moment
- k_i, k_o = wavevector in, out
- e_i, e_o = polarisation in, out
From Hill+McMorrow Acta Cryst. 1996 A52, 236-244 Equ. (2)
Book: "X-ray Scattering and Absorption by Magnetic Materials" by <NAME> Collins. Ch 2. Eqn.2.21+1
No orbital component assumed
magnetic moments assumed to be in the same reference frame as the polarisation
"""
phase = phase_factor_qr(q, r)
moment = np.asarray(moment, dtype=np.float).reshape((-1, 3))
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if magnetic_formfactor is None:
magnetic_formfactor = np.ones(phase.shape)
# Calculate structure factor
_debug('sf_magnetic_xray(phase.shape=%s)' % (phase.shape,))
sf = np.zeros(len(q), dtype=np.complex)
for n in range(len(q)):
# Calculate vector structure factor
sfm = np.array([0., 0., 0.])
for m, mom in enumerate(moment):
sfm = sfm + magnetic_formfactor[n, m] * debyewaller[n, m] * occ[m] * phase[n, m] * mom
# average polarisation
sf[n] = (np.dot(sfm, [1, 0, 0]) + np.dot(sfm, [0, 1, 0]) + np.dot(sfm, [0, 0, 1])) / 3
return sf
def sf_magnetic_xray_polarised(q, r, moment, incident_polarisation_vector=(1, 0, 0),
magnetic_formfactor=None, occ=None, debyewaller=None, **kwargs):
"""
Calculate the non-resonant magnetic component of the structure factor
:param q: [n,3] array of hkl reflections
:param r: [m,3] array of atomic positions in r.l.u.
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param incident_polarisation_vector: [1,3] direction of incident polarisation
:param magnetic_formfactor: [n,m] array of magnetic form factors for each atom and relection
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection
:param kwargs: additional options[*unused]
:return sf: [n] complex array of structure factors
f_non-res_mag = i.r0.(hw/mc^2).fD.[.5.L.A + S.B]
B = e_o X e_i + (k_o X e_o) * k_o.e_i - (k_i X e_i) * k_i.e_o - (k_o X e_o) X (k_i X e_i)
- ignore orbital moment L
- fD = magnetic form factor
- S = spin moment
- k_i, k_o = wavevector in, out
- e_i, e_o = polarisation in, out
From Hill+McMorrow Acta Cryst. 1996 A52, 236-244 Equ. (2)
Book: "X-ray Scattering and Absorption by Magnetic Materials" by <NAME> Collins. Ch 2. Eqn.2.21+1
No orbital component assumed
magnetic moments assumed to be in the same reference frame as the polarisation
"""
phase = phase_factor_qr(q, r)
moment = np.asarray(moment, dtype=np.float).reshape((-1, 3))
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if magnetic_formfactor is None:
magnetic_formfactor = np.ones(phase.shape)
# Calculate structure factor
_debug('sf_magnetic_xray_polarised(phase.shape=%s)' % (phase.shape,))
sf = np.zeros(len(q), dtype=np.complex)
for n in range(len(q)):
# Calculate vector structure factor
sfm = np.array([0., 0., 0.])
for m, mom in enumerate(moment):
sfm = sfm + magnetic_formfactor[n, m] * debyewaller[n, m] * occ[m] * phase[n, m] * mom
# Calculate polarisation with incident x-ray
# The reference frame of the x-ray and the crystal are assumed to be the same
# i.e. pol=[1,0,0] || mom=[1,0,0] || (1,0,0)
sf[n] = np.dot(sfm, incident_polarisation_vector)
return sf
def sf_magnetic_xray_beamline(q, r, moment, energy_kev, magnetic_formfactor=None, occ=None, debyewaller=None,
azi_ref_q=(1, 0, 0), psi=0, polarisation='s-p', **kwargs):
"""
Calculate the non-resonant magnetic component of the structure factor
:param q: [n,3] array of hkl reflections
:param r: [m,3] array of atomic positions in r.l.u.
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param energy_kev: float value of incident x-ray energy in keV
:param magnetic_formfactor: [n,m] array of magnetic form factors for each atom and relection
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection
:param azi_ref_q: [1,3] azimuthal refence, in cartesian basis (Q)
:param psi: [p] array of azimthal angles - the rotation out of the scattering plane.
:param polarisation: str definition of the polarisation can be: ['ss','sp','ps','pp'] with 's'=sigma, 'p'=pi
:param kwargs: additional options[*unused]
:return sf: [n, p] complex array of structure factors for different reflections and azimuths
f_non-res_mag = i.r0.(hw/mc^2).fD.[.5.L.A + S.B]
B = e_o X e_i + (k_o X e_o) * k_o.e_i - (k_i X e_i) * k_i.e_o - (k_o X e_o) X (k_i X e_i)
- ignore orbital moment L
- fD = magnetic form factor
- S = spin moment
- k_i, k_o = wavevector in, out
- e_i, e_o = polarisation in, out
From Hill+McMorrow Acta Cryst. 1996 A52, 236-244 Equ. (2)
Book: "X-ray Scattering and Absorption by Magnetic Materials" by <NAME> Collins. Ch 2. Eqn.2.21+1
No orbital component assumed
magnetic moments assumed to be in the same reference frame as the polarisation
"""
phase = phase_factor_qr(q, r)
moment = np.asarray(moment, dtype=np.float).reshape((-1, 3))
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if magnetic_formfactor is None:
magnetic_formfactor = np.ones(phase.shape)
psi = np.asarray(psi, dtype=np.float).reshape([-1])
npsi = len(psi)
_debug('sf_magnetic_xray_beamline(phase.shape=%s, npsi=%d)' % (phase.shape, npsi))
sf = np.zeros([len(q), npsi], dtype=np.complex)
for psival in range(npsi):
kin, kout, ein, eout = scatteringvectors(q, energy_kev, azi_ref_q, psi, polarisation)
# Magnetic form factor
# f_non-res_mag = i.r0.(hw/mc^2).fD.[.5.L.A + S.B] #equ 2 Hill+McMorrow 1996
# ignore orbital moment L
fspin = np.zeros([len(q), len(r)], dtype=np.complex)
for n in range(len(q)):
B = np.cross(eout[n], ein[n]) + \
np.cross(kout[n], eout[n]) * np.dot(kout[n], ein[n]) - \
np.cross(kin[n], ein[n]) * np.dot(kin[n], eout[n]) - \
np.cross(np.cross(kout[n], eout[n]), np.cross(kin[n], ein[n]))
fspin[n, :] = 1j * magnetic_formfactor[n, :] * np.dot(moment, B)
sf[:, psival] = np.sum(fspin * occ * debyewaller * phase, axis=1)
if npsi == 1:
return sf[:, 0]
return sf
def sf_magnetic_xray_resonant(q, r, moment, energy_kev, occ=None, debyewaller=None, azi_ref_q=(1, 0, 0), psi=0,
polarisation='sp', f0=0, f1=1, f2=0, **kwargs):
"""
Calculate the non-resonant magnetic component of the structure factor
:param q: [n,3] array of hkl reflections
:param r: [m,3] array of atomic positions in r.l.u.
:param moment: [m,3] array of magnetic moment direction in orthogonal basis
:param energy_kev: float value of incident x-ray energy in keV
:param occ: [m,1] array of atomic occupancies
:param debyewaller: [n,m] array of thermal factors for each atom and reflection
:param azi_ref_q: [1,3] azimuthal refence, in cartesian basis (Q)
:param psi: [p] array of azimthal angles - the rotation out of the scattering plane.
:param polarisation: str definition of the polarisation can be: ['ss','sp','ps','pp'] with 's'=sigma, 'p'=pi
:param f0: float Flm value 0 (charge)
:param f1: float Flm value 1
:param f2: float Flm value 2
:param kwargs: additional options[*unused]
:return sf: [n, p] complex array of structure factors for different reflections and azimuths
f_res_mag = [(e'.e)F0 - i(e'xe).Z*F1 + (e'.Z)*(e.Z)*F2]
From Hill+McMorrow Acta Cryst. 1996 A52, 236-244 Equ. (2)
Book: "X-ray Scattering and Absorption by Magnetic Materials" by <NAME> Collins. Ch 2. Eqn.2.21+1
No orbital component assumed
magnetic moments assumed to be in the same reference frame as the polarisation
"""
phase = phase_factor_qr(q, r)
moment = fg.norm(moment).reshape((-1, 3))
z = fg.norm(moment) # z^n is a unit vector in the direction of the magnetic moment of the nth ion.
if occ is None:
occ = np.ones(phase.shape[1])
if debyewaller is None:
debyewaller = np.ones(phase.shape)
if debyewaller is None:
debyewaller = np.ones(phase.shape)
psi = np.asarray(psi, dtype=np.float).reshape([-1])
npsi = len(psi)
_debug('sf_magnetic_xray_resonant(phase.shape=%s, npsi=%d)' % (phase.shape, npsi))
sf = np.zeros([len(q), npsi], dtype=np.complex)
for psival in range(npsi):
kin, kout, | |
lms/djangoapps/lms_xblock/apps.py
def descriptor_global_local_resource_url(block, uri):
"""
See :meth:`xblock.runtime.Runtime.local_resource_url`.
"""
raise NotImplementedError("Applications must monkey-patch this function before using local_resource_url for studio_view") # lint-amnesty, pylint: disable=line-too-long
class MetricsMixin:
"""
Mixin for adding metric logging for render and handle methods in the DescriptorSystem and ModuleSystem.
"""
def render(self, block, view_name, context=None): # lint-amnesty, pylint: disable=missing-function-docstring
start_time = time.time()
try:
status = "success"
return super().render(block, view_name, context=context)
except:
status = "failure"
raise
finally:
end_time = time.time()
duration = end_time - start_time
course_id = getattr(self, 'course_id', '')
tags = [ # lint-amnesty, pylint: disable=unused-variable
f'view_name:{view_name}',
'action:render',
f'action_status:{status}',
f'course_id:{course_id}',
f'block_type:{block.scope_ids.block_type}',
f'block_family:{block.entry_point}',
]
log.debug(
"%.3fs - render %s.%s (%s)",
duration,
block.__class__.__name__,
view_name,
getattr(block, 'location', ''),
)
def handle(self, block, handler_name, request, suffix=''): # lint-amnesty, pylint: disable=missing-function-docstring
start_time = time.time()
try:
status = "success"
return super().handle(block, handler_name, request, suffix=suffix)
except:
status = "failure"
raise
finally:
end_time = time.time()
duration = end_time - start_time
course_id = getattr(self, 'course_id', '')
tags = [ # lint-amnesty, pylint: disable=unused-variable
f'handler_name:{handler_name}',
'action:handle',
f'action_status:{status}',
f'course_id:{course_id}',
f'block_type:{block.scope_ids.block_type}',
f'block_family:{block.entry_point}',
]
log.debug(
"%.3fs - handle %s.%s (%s)",
duration,
block.__class__.__name__,
handler_name,
getattr(block, 'location', ''),
)
class DescriptorSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime):
"""
Base class for :class:`Runtime`s to be used with :class:`XModuleDescriptor`s
"""
def __init__(
self, load_item, resources_fs, error_tracker, get_policy=None, disabled_xblock_types=lambda: [], **kwargs
):
"""
load_item: Takes a Location and returns an XModuleDescriptor
resources_fs: A Filesystem object that contains all of the
resources needed for the course
error_tracker: A hook for tracking errors in loading the descriptor.
Used for example to get a list of all non-fatal problems on course
load, and display them to the user.
See errortracker.py for more documentation
get_policy: a function that takes a usage id and returns a dict of
policy to apply.
local_resource_url: an implementation of :meth:`xblock.runtime.Runtime.local_resource_url`
"""
kwargs.setdefault('id_reader', OpaqueKeyReader())
kwargs.setdefault('id_generator', AsideKeyGenerator())
super().__init__(**kwargs)
# This is used by XModules to write out separate files during xml export
self.export_fs = None
self.load_item = load_item
self.resources_fs = resources_fs
self.error_tracker = error_tracker
if get_policy:
self.get_policy = get_policy
else:
self.get_policy = lambda u: {}
self.disabled_xblock_types = disabled_xblock_types
def get_block(self, usage_id, for_parent=None):
"""See documentation for `xblock.runtime:Runtime.get_block`"""
return self.load_item(usage_id, for_parent=for_parent)
def load_block_type(self, block_type):
"""
Returns a subclass of :class:`.XBlock` that corresponds to the specified `block_type`.
"""
if block_type in self.disabled_xblock_types():
return self.default_class
return super().load_block_type(block_type)
def get_field_provenance(self, xblock, field):
"""
For the given xblock, return a dict for the field's current state:
{
'default_value': what json'd value will take effect if field is unset: either the field default or
inherited value,
'explicitly_set': boolean for whether the current value is set v default/inherited,
}
:param xblock:
:param field:
"""
# pylint: disable=protected-access
# in runtime b/c runtime contains app-specific xblock behavior. Studio's the only app
# which needs this level of introspection right now. runtime also is 'allowed' to know
# about the kvs, dbmodel, etc.
result = {}
result['explicitly_set'] = xblock._field_data.has(xblock, field.name)
try:
result['default_value'] = xblock._field_data.default(xblock, field.name)
except KeyError:
result['default_value'] = field.to_json(field.default)
return result
def handler_url(self, block, handler_name, suffix='', query='', thirdparty=False):
# Currently, Modulestore is responsible for instantiating DescriptorSystems
# This means that LMS/CMS don't have a way to define a subclass of DescriptorSystem
# that implements the correct handler url. So, for now, instead, we will reference a
# global function that the application can override.
return descriptor_global_handler_url(block, handler_name, suffix, query, thirdparty)
def local_resource_url(self, block, uri):
"""
See :meth:`xblock.runtime.Runtime:local_resource_url` for documentation.
"""
# Currently, Modulestore is responsible for instantiating DescriptorSystems
# This means that LMS/CMS don't have a way to define a subclass of DescriptorSystem
# that implements the correct local_resource_url. So, for now, instead, we will reference a
# global function that the application can override.
return descriptor_global_local_resource_url(block, uri)
def applicable_aside_types(self, block):
"""
See :meth:`xblock.runtime.Runtime:applicable_aside_types` for documentation.
"""
potential_set = set(super().applicable_aside_types(block))
if getattr(block, 'xmodule_runtime', None) is not None:
if hasattr(block.xmodule_runtime, 'applicable_aside_types'):
application_set = set(block.xmodule_runtime.applicable_aside_types(block))
return list(potential_set.intersection(application_set))
return list(potential_set)
def resource_url(self, resource):
"""
See :meth:`xblock.runtime.Runtime:resource_url` for documentation.
"""
raise NotImplementedError("edX Platform doesn't currently implement XBlock resource urls")
def add_block_as_child_node(self, block, node):
child = etree.SubElement(node, "unknown")
child.set('url_name', block.url_name)
block.add_xml_to_node(child)
def publish(self, block, event_type, event): # lint-amnesty, pylint: disable=arguments-differ
# A stub publish method that doesn't emit any events from XModuleDescriptors.
pass
def service(self, block, service_name):
"""
Runtime-specific override for the XBlock service manager. If a service is not currently
instantiated and is declared as a critical requirement, an attempt is made to load the
module.
Arguments:
block (an XBlock): this block's class will be examined for service
decorators.
service_name (string): the name of the service requested.
Returns:
An object implementing the requested service, or None.
"""
# getting the service from parent module. making sure of block service declarations.
service = super().service(block=block, service_name=service_name)
# Passing the block to service if it is callable e.g. ModuleI18nService. It is the responsibility of calling
# service to handle the passing argument.
if callable(service):
return service(block)
return service
class XMLParsingSystem(DescriptorSystem): # lint-amnesty, pylint: disable=abstract-method, missing-class-docstring
def __init__(self, process_xml, **kwargs):
"""
process_xml: Takes an xml string, and returns a XModuleDescriptor
created from that xml
"""
super().__init__(**kwargs)
self.process_xml = process_xml
def _usage_id_from_node(self, node, parent_id, id_generator=None):
"""Create a new usage id from an XML dom node.
Args:
node (lxml.etree.Element): The DOM node to interpret.
parent_id: The usage ID of the parent block
id_generator (IdGenerator): The :class:`.IdGenerator` to use
for creating ids
Returns:
UsageKey: the usage key for the new xblock
"""
return self.xblock_from_node(node, parent_id, id_generator).scope_ids.usage_id
def xblock_from_node(self, node, parent_id, id_generator=None):
"""
Create an XBlock instance from XML data.
Args:
xml_data (string): A string containing valid xml.
system (XMLParsingSystem): The :class:`.XMLParsingSystem` used to connect the block
to the outside world.
id_generator (IdGenerator): An :class:`~xblock.runtime.IdGenerator` that
will be used to construct the usage_id and definition_id for the block.
Returns:
XBlock: The fully instantiated :class:`~xblock.core.XBlock`.
"""
id_generator = id_generator or self.id_generator
# leave next line commented out - useful for low-level debugging
# log.debug('[_usage_id_from_node] tag=%s, class=%s' % (node.tag, xblock_class))
block_type = node.tag
# remove xblock-family from elements
node.attrib.pop('xblock-family', None)
url_name = node.get('url_name') # difference from XBlock.runtime
def_id = id_generator.create_definition(block_type, url_name)
usage_id = id_generator.create_usage(def_id)
keys = ScopeIds(None, block_type, def_id, usage_id)
block_class = self.mixologist.mix(self.load_block_type(block_type))
aside_children = self.parse_asides(node, def_id, usage_id, id_generator)
asides_tags = [x.tag for x in aside_children]
block = block_class.parse_xml(node, self, keys, id_generator)
self._convert_reference_fields_to_keys(block) # difference from XBlock.runtime
block.parent = parent_id
block.save()
asides = self.get_asides(block)
for asd in asides:
if asd.scope_ids.block_type in asides_tags:
block.add_aside(asd)
return block
def parse_asides(self, node, def_id, usage_id, id_generator):
"""pull the asides out of the xml payload and instantiate them"""
aside_children = []
for child in node.iterchildren():
# get xblock-family from node
xblock_family = child.attrib.pop('xblock-family', None)
if xblock_family:
xblock_family = self._family_id_to_superclass(xblock_family)
if issubclass(xblock_family, XBlockAside):
aside_children.append(child)
# now process them & remove them from the xml payload
for child in aside_children:
self._aside_from_xml(child, def_id, usage_id, id_generator)
node.remove(child)
return aside_children
def _make_usage_key(self, course_key, value):
"""
Makes value into a UsageKey inside the specified course.
If value is already a UsageKey, returns that.
"""
if isinstance(value, UsageKey):
return value
usage_key = UsageKey.from_string(value)
return usage_key.map_into_course(course_key)
def _convert_reference_fields_to_keys(self, xblock):
"""
Find all fields of type reference and convert the payload into UsageKeys
"""
course_key = xblock.scope_ids.usage_id.course_key
for field in xblock.fields.values():
if field.is_set_on(xblock):
field_value = getattr(xblock, field.name)
if field_value is None:
continue
elif isinstance(field, Reference):
setattr(xblock, field.name, self._make_usage_key(course_key, field_value))
elif isinstance(field, ReferenceList):
setattr(xblock, field.name, [self._make_usage_key(course_key, ele) for ele in field_value])
elif isinstance(field, ReferenceValueDict):
for key, subvalue in field_value.items():
assert isinstance(subvalue, str)
field_value[key] = self._make_usage_key(course_key, subvalue)
setattr(xblock, field.name, field_value)
class ModuleSystem(MetricsMixin, ConfigurableFragmentWrapper, Runtime):
"""
This is an abstraction such that x_modules can function independent
of the courseware (e.g. import into other types of courseware, LMS,
or if we want to have a sandbox server for user-contributed content)
ModuleSystem objects are passed to x_modules to provide access to system
functionality.
Note that these functions can be closures over e.g. a django request
and user, or other environment-specific info.
"""
def __init__(
self, static_url, track_function, get_module, render_template,
replace_urls, descriptor_runtime, user=None, filestore=None,
debug=False, hostname="", xqueue=None, publish=None, node_path="",
anonymous_student_id='', course_id=None,
cache=None, can_execute_unsafe_code=None, replace_course_urls=None,
replace_jump_to_id_urls=None, error_descriptor_class=None, get_real_user=None,
field_data=None, get_user_role=None, rebind_noauth_module_to_user=None,
| |
#Copyright 2014 Blackberry Limited
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#globalstatemanager/gsm.py
#remember to use at least pysftp 0.2.2 - i
#the pip install doesnt give you that version.
from pysftp import Connection
from os.path import dirname,join,basename,getsize,isfile
from os import listdir
from ConfigParser import RawConfigParser
from dulwich.repo import Repo
from django.core.management import execute_from_command_line
from django.db.models import Sum
from ssdfrontend.models import VG
from ssdfrontend.models import StorageHost
from ssdfrontend.models import LV
from ssdfrontend.models import Target
from ssdfrontend.models import Interface
from ssdfrontend.models import IPRange
from django.contrib.auth.models import User
from logging import getLogger
import utils.scstconf
import sys
from traceback import format_exc
from utils.configreader import ConfigReader
import socket
import ipaddress
reload (sys)
sys.setdefaultencoding("utf-8")
logger = getLogger(__name__)
class PollServer():
"""
This is the controller that calls/runs scripts on a Saturn server
as required by saturnring
"""
def __init__(self,serverDNS):
"""
The init script for the class
"""
try:
self.serverDNS = str(serverDNS)
self.hostobject = StorageHost.objects.get(dnsname=self.serverDNS)
BASE_DIR = dirname(dirname(__file__))
config = ConfigReader()
self.userName = config.get('saturnnode','user')
self.keyFile = join(BASE_DIR,config.get('saturnring','privatekeyfile'))
self.rembashpath = config.get('saturnnode','bashpath')
self.rempypath = config.get('saturnnode','pythonpath')
self.iscsiconfdir = join(BASE_DIR,config.get('saturnring','iscsiconfigdir'))
self.remoteinstallLoc = config.get('saturnnode','install_location')
self.localbashscripts = join(BASE_DIR,config.get('saturnring','bashscripts'))
except:
logger.critical("Error setting up configuration for server "+self.serverDNS)
logger.critical(format_exc())
try:
self.srv = Connection(self.serverDNS,self.userName,self.keyFile)
except:
logger.critical("Failed SSH-exec connection on Saturn server %s; possible cause: %s" % (self.serverDNS,format_exc()) )
self.srv="inError"
def CheckServer(self):
if self.srv == 'inError':
return -1
remotePath = join(self.remoteinstallLoc,'saturn-bashscripts')
cmdStr = " ".join([join(remotePath,'checkserver.sh'), '2> checkservererror.log'])
#logger.info("Executing %s on %s" %(cmdStr,self.serverDNS))
rtnStrList = self.Exec(cmdStr)
if (rtnStrList == -1):
return -2
else:
for aLine in rtnStrList:
if "FAILURE" in aLine:
logger.error(self.serverDNS + ": "+ str(rtnStrList))
return -3
return 0
def InstallScripts(self):
"""
Copy bash scripts from the saturnringserver into the saturn server via sftp
"""
rtnVal = -1
try:
if self.srv == "inError":
raise Exception('Server SSH connection object inError')
remotePath = join(self.remoteinstallLoc,'saturn-bashscripts')
self.srv.execute (" ".join(['mkdir', '-p', remotePath]))
self.srv.chdir(remotePath)
locallist=listdir(self.localbashscripts)
for localfile in locallist:
self.srv.put(join(self.localbashscripts,localfile))
self.srv.execute(" ".join(["chmod", "777",join(remotePath,localfile)]))
#Update rc.local for luks reboot functionality
luksopenscriptpath = join(remotePath,'luksonreboot.sh');
self.srv.execute("sudo sed -i '/luksonreboot.sh/d' /etc/rc.local") #delete pre-existing line if any
self.srv.execute("sudo sed -i '/^exit 0/i " + '/bin/bash ' + luksopenscriptpath +"' /etc/rc.local")
logger.info("Installed scripts on "+ self.serverDNS)
rtnVal = 1
except:
logger.error('Could not install scripts on '+self.serverDNS)
logger.error(format_exc())
finally:
return rtnVal
def Exec(self,command):
"""
Helper function for executing a remote command over an SSH tunnel
"""
rtncmd = -1
if self.srv=="inError":
logger.error("There is no ssh connection object for server: %s" %(self.serverDNS,))
return -1
try:
#srv = Connection(self.serverDNS,self.userName,self.keyFile)
rtncmd=self.srv.execute(command)
#srv.close()
except:
logger.error("Failed SSH-exec command: %s on Saturn server %s" % (command, self.serverDNS))
logger.error(format_exc())
return rtncmd
def GetFile(self,remotePath,localPath):
"""
Get a file from the remote server.
return 1 on success, -1 on error
"""
try:
self.srv.get(remotePath,localPath)
#logger.info("Copying file %s from remote server %s to local path %s succeeded" %(remotePath,self.serverDNS,localPath))
return 1
except:
logger.error("Error copying file %s from remote server %s to local path %s" %(remotePath,self.serverDNS,localPath))
logger.error(format_exc())
return -1
def PutKeyFile(self,keyfileName):
"""
Copy over the keyfile to be used for creating the LUKs encrypted DM volumes
"""
remoteKeyfileDir = join(self.remoteinstallLoc,'keys')
try:
self.Exec (" ".join(['mkdir','-p',remoteKeyfileDir]))
self.srv.chdir(remoteKeyfileDir)
self.srv.put(join(self.iscsiconfdir,keyfileName))
self.remoteKeyfilePath = join(remoteKeyfileDir,keyfileName)
rtnString = self.Exec ('test -f ' + self.remoteKeyfilePath + '&& echo "OK Putkeyfile" ')
logger.info(rtnString)
if "OK Putkeyfile" not in str(rtnString):
raise ValueError("Putkey didnt install file")
except ValueError:
logger.error("Failed to put keyfile on Saturn server %s at location %s" %(self.serverDNS,join(remoteKeyfileDir,keyfileName)))
logger.error(format_exc())
return -1
return self.remoteKeyfilePath
def DelKeyFile(self,keyfileName):
"""
Delete key file from saturn server
"""
remoteKeyfileDir = join(self.remoteinstallLoc,'keys')
self.srv.execute('rm '+ join(remoteKeyfileDir,keyfileName))
rtnString = self.Exec ('test ! -f ' + join(self.iscsiconfdir,keyfileName)+ ' && echo "OK Deleted keyfile"')
return rtnString
def ParseLVM(self,strList,delimitStr,paraList):
"""
Parse lvdisplay and vgdisplay strings and populate
dictionaries with relevant information
"""
rtnDict ={}
valueDict={}
for aLine in strList:
if (delimitStr in aLine):
if len(valueDict) == len(paraList):
rtnDict[valueDict[paraList[0]]]=valueDict
valueDict = {}
continue
else:
for anItem in paraList:
if anItem in aLine:
valueDict[anItem] = aLine.split(anItem)[1].strip()
if '%' in valueDict[anItem]:
valueDict[anItem] = float(valueDict[anItem][:-2])
continue
if '/' in valueDict[anItem]:
valueDict[anItem] = valueDict[anItem].split('/')[0]
if (('GiB' in valueDict[anItem]) and ('Size' in aLine)):
valueDict[anItem] = float(valueDict[anItem].split('GiB')[0])*1
continue
if (('MiB' in valueDict[anItem]) and ('Size' in aLine)):
valueDict[anItem] = float(valueDict[anItem].split('MiB')[0])*0.001
continue
continue
if len(valueDict) == len(paraList):
rtnDict[valueDict[paraList[0]]] = valueDict
#logger.info(rtnDict)
return rtnDict
def UpdateLVs(self,vgObject):
"""
Update LV information, called to monitor and update capacity.
"""
lvdict = self.GetLVs(vgObject.vguuid)
if "No LVs " in lvdict:
logger.info("There are no LVs in %s to run UpdateLVs on in Saturn host %s" %(vgObject.vguuid, self.serverDNS))
return 0
if lvdict == -1:
logger.error ("Could not run GetLVs (perhaps there are no LVs in this VG yet?)")
return -1
lvs = set(LV.objects.filter(vg=vgObject))
lvDict = {}
for eachlv in lvs:
lvDict[eachlv.lvname] = eachlv
for lvName,lvinfo in lvdict.iteritems():
if lvName in lvDict:
preexistLV=lvDict[lvName]
preexistLV.lvsize=lvinfo['LV Size']
preexistLV.save(update_fields=['lvsize'])
else:
logger.warn("Found orphan LV %s in VG %s on host %s" %(lvName,vgObject.vguuid,self.serverDNS))
def GetLVs(self,vguuid):
"""
Wrapper for parselvm (for LVs), actually populating the DB is done by the UpdateLV function
"""
execCmd = " ".join(['sudo','vgdisplay', '-c','|','grep',vguuid,'|','cut -d: -f1'])
vgname = self.Exec(execCmd)[0].strip()
if vgname == -1:
logger.error("Could not execute %s on %s " % (execCmd,self.serverDNS))
return -1
execCmd=" ".join(['sudo','lvdisplay','--units g',vgname])
lvStrList = self.Exec(execCmd)
if lvStrList ==[""]:
return "No LVs in %s" %(vguuid,)
if lvStrList == -1:
logger.error("Could not execute %s on %s " % (execCmd,self.serverDNS))
return -1
delimitStr = '--- Logical volume ---'
paraList=['LV Name','LV UUID','LV Size']
lvs = self.ParseLVM(lvStrList,delimitStr,paraList)
return lvs
def GetVG(self): #Unit test this again
"""
Wrapper for parseLVM (for VGs)+populating the DB
"""
delimitStr = '--- Volume group ---'
paraList = ['VG Name','VG Size','PE Size','Total PE', 'Free PE / Size', 'VG UUID']
execCmd = " ".join(['sudo','vgdisplay','--units g'])
vgStrList = self.Exec(execCmd)
if vgStrList == -1:
logger.error("Error in GetVG while executing %s on server %s " %(execCmd,self.serverDNS))
return -1
vgs = self.ParseLVM(vgStrList,delimitStr,paraList)
#logger.info("VGStating on %s returns %s " % (self.serverDNS, str(vgs)) )
rtnvguuidList = ""
for vgname in vgs:
try:
execCmd = " ".join(['sudo',join(self.remoteinstallLoc,'saturn-bashscripts/vgstats.sh'),vgname,' 2> error.log'])
cmdStr = self.Exec(execCmd)
maxavl = float(cmdStr[0].rstrip())
totalGB = float(cmdStr[1].rstrip())
isThin = bool(int(cmdStr[2].rstrip()))
except:
logger.warn("Unable to run VGscan, disabling VG on "+self.serverDNS)
logger.warn(format_exc())
try:
vg = VG.objects.get(vguuid=vgs[vgname]['VG UUID'])
vg.in_error = True
vg.save(update_fields=['in_error'])
except:
logger.error("VG not found in DB: %s" % ( vgs[vgname]['VG UUID'],))
return 3
existingvgs = VG.objects.filter(vguuid=vgs[vgname]['VG UUID'])
if len(existingvgs)==1:
existingvg = existingvgs[0]
existingvg.in_error=False
existingvg.CurrentAllocGB = totalGB-maxavl#Target.objects.filter(targethost=existingvg.vghost).aggregate(Sum('sizeinGB'))['sizeinGB__sum']
existingvg.totalGB=totalGB
existingvg.maxavlGB=maxavl
existingvg.is_thin=isThin
existingvg.vgsize = vgs[vgname]['VG Size']
existingvg.save(update_fields=['totalGB','maxavlGB','vgsize','CurrentAllocGB','in_error','is_thin'])
#logger.info( "Ran in existingVG loop")
else:
logger.info("Found new VG, adding\n" + str(vgs[vgname]))
myvg = VG(vghost=StorageHost.objects.get(dnsname=self.serverDNS),vgsize=vgs[vgname]['VG Size'],
vguuid=vgs[vgname]['VG UUID'],vgpesize=vgs[vgname]['PE Size'],
vgtotalpe=vgs[vgname]['Total PE'],
vgfreepe=vgs[vgname]['Free PE / Size'],
totalGB=totalGB,maxavlGB=maxavl, is_thin=isThin)
myvg.save()#force_update=True)
rtnvguuidList = rtnvguuidList+ ','+ vgs[vgname]['VG UUID']
return rtnvguuidList[1:]
def GitSave(self,commentStr):
"""
Check in changes to config files into git repository
"""
try:
repo = Repo(self.iscsiconfdir)
filelist = [ f for f in listdir(self.iscsiconfdir) if isfile(join(self.iscsiconfdir,f)) ]
repo.stage(filelist)
repo.do_commit(commentStr)
return 1
except:
var = format_exc()
logger.error("During GitSave %s: Git save error: %s" % (commentStr,var))
return -1
def CreateTarget(self,iqnTarget,iqnInit,sizeinGB,storageip1,storageip2,vguuid,isencrypted):
"""
Create iSCSI target by running the createtarget script;
and save latest scst.conf from the remote server (overwrite)
"""
#self.srv = Connection(self.serverDNS,self.userName,self.keyFile)
if str(isencrypted) != '1':
cmdStr = " ".join(['sudo',self.rembashpath,join(self.remoteinstallLoc,'saturn-bashscripts','createtarget.sh'),
str(sizeinGB),iqnTarget,storageip1,storageip2,iqnInit,vguuid, '2> createtarget.sh-error.log'])
else:
try:
self.remotekeyfilelocation = self.PutKeyFile("cryptokey")
cmdStr = " ".join(['sudo',self.rembashpath,join(self.remoteinstallLoc,'saturn-bashscripts','createencryptedtarget.sh'),
str(sizeinGB),iqnTarget,storageip1,storageip2,iqnInit,vguuid,self.remotekeyfilelocation,'2> createencryptedtarget.sh-error.log'])
if self.remotekeyfilelocation == -1:
raise ValueError("Putkey failed")
except:
logger.error("Error setting up encrypted target: %s " %(iqnTarget,))
logger.error(format_exc())
return -1
#srv.close()
logger.info ("Launching createtarget with \n%s" %(cmdStr,))
exStr=self.Exec(cmdStr)
if exStr == -1:
return -1
commentStr = "Trying to create target %s " %( iqnTarget, )
try:
if self.GetFile('/temp/scst.conf',self.iscsiconfdir+self.serverDNS+'.scst.conf')==-1:
raise Exception('Error getting scst.conf')
if self.GetFile(join('/temp',vguuid),join(self.iscsiconfdir,self.serverDNS+'.'+vguuid+'.lvm'))==-1:
raise Exception('Error getting LVM configuration file %s' %(vguuid+'.lvm',))
if self.GitSave(commentStr) == -1:
raise Exception('Error in GitSave')
except:
logger.warning('Unable to save updated config files on ring server')
logger.warning(format_exc())
logger.info("Execution report for %s: %s" %(cmdStr,"\t".join(exStr)))
if "SUCCESS" in str(exStr):
logger.info("Returning successful createtarget run")
return 1
else:
logger.error("Returning failed createtarget run:" + str(exStr))
return 0
def GetTargetsState(self):
"""
Read targets to determine their latest state | |
"""Stochastic compartmental models."""
import logging
from pathlib import Path
import numpy as np
import pypfilt
from .model import Model
class SEEIIR(Model):
"""A stochastic SEEIIR compartment model."""
__info = [
("S_U", False, 0, 1),
("E1_U", False, 0, 1), ("E2_U", False, 0, 1),
("I1_U", False, 0, 1), ("I2_U", False, 0, 1),
("R_U", False, 0, 1),
("S_V", False, 0, 1),
("E1_V", False, 0, 1), ("E2_V", False, 0, 1),
("I1_V", False, 0, 1), ("I2_V", False, 0, 1),
("R_V", False, 0, 1),
("R0", False, 1.0, 2.0),
("sigma", True, 1/3, 2.0),
("gamma", True, 1/3, 1.0),
("t0", False, 0, 100),
("R0_ix", False, 0, 1e6),
("R0_val", False, 0, 100),
("R_bias", False, 1/3, 2.0),
("adjustment", False, 0, 1)]
ix_S_U = 0
ix_E1_U = 1
ix_E2_U = 2
ix_I1_U = 3
ix_I2_U = 4
ix_R_U = 5
ix_S_V = 6
ix_E1_V = 7
ix_E2_V = 8
ix_I1_V = 9
ix_I2_V = 10
ix_R_V = 11
ix_R0 = 12
ix_sigma = 13
ix_gamma = 14
ix_t0 = 15
ix_R0_ix = 16
ix_R0_val = 17
ix_R_bias = 18
ix_adjustment = 19
R0_order_map = np.arange(0, 1000, 1)
sigma_transitions = None
gamma_transitions = None
vacc_transitions = None
comp_mask_all = None
comp_mask_U = None
n_compartments = 12
def __init__(self):
self.__R0_lookup = None
self.__external_lookup = None
self.__regularise_R0_ix = False
def state_size(self):
"""Return the size of the state vector."""
return len(self.__info)
def population_size(self):
return self.popn_size
def init(self, ctx, vec):
"""Initialise a state vector.
:param ctx: The simulation context.
:param vec: An uninitialised state vector of correct dimensions (see
:py:func:`~state_size`).
"""
self.popn_size = ctx.params['model']['population_size']
self.__R0_lookup = None
self.__external_lookup = None
self.__vaccinations_lookup = None
self.__regularise_R0_ix = ctx.params.get_chained(
['model', 'regularisation', 'R0_ix'], default=False)
prior = ctx.params['model']['prior']
rnd_size = vec[..., 0].shape
rnd = ctx.component['random']['model']
num_exps = 10.0
vec[..., :] = 0
vec[..., self.ix_S_U] = self.popn_size - num_exps
vec[..., self.ix_I1_U] = num_exps
vec[..., self.ix_R0] = prior['R0'](rnd, size=rnd_size)
vec[..., self.ix_sigma] = prior['sigma'](rnd, size=rnd_size)
vec[..., self.ix_gamma] = prior['gamma'](rnd, size=rnd_size)
vec[..., self.ix_t0] = prior['t0'](rnd, size=rnd_size)
vec[..., self.ix_adjustment] = 1
sigma_transitions_ix = [(self.ix_E1_U, self.ix_E2_U), (self.ix_E2_U, self.ix_I1_U),
(self.ix_E1_V, self.ix_E2_V), (self.ix_E2_V, self.ix_I1_V)]
self.sigma_transitions = np.moveaxis(np.array(sigma_transitions_ix), -1, 0)
gamma_transitions_ix = [(self.ix_I1_U, self.ix_I2_U), (self.ix_I2_U, self.ix_R_U),
(self.ix_I1_V, self.ix_I2_V), (self.ix_I2_V, self.ix_R_V)]
self.gamma_transitions = np.moveaxis(np.array(gamma_transitions_ix), -1, 0)
vacc_transitions_ix = [(self.ix_S_U, self.ix_S_V), (self.ix_E1_U, self.ix_E1_V), (self.ix_E2_U, self.ix_E2_V),
(self.ix_I1_U, self.ix_I1_V), (self.ix_I2_U, self.ix_I2_V), (self.ix_R_U, self.ix_R_V)]
self.vacc_transitions = np.moveaxis(np.array(vacc_transitions_ix), -1, 0)
self.comp_mask_U = np.array([1,1,1,1,1,1,0,0,0,0,0,0])
self.comp_mask_all = np.ones(self.n_compartments)
self.load_samples_file(ctx, vec)
self.load_lookup_tables(ctx)
self.init_lookup_values(ctx, vec)
def sample_columns(self):
"""Identify parameters that can be saved and loaded."""
ix_tbl = {
'R0': self.ix_R0,
'sigma': self.ix_sigma,
'gamma': self.ix_gamma,
't0': self.ix_t0,
'R0_ix': self.ix_R0_ix,
'R_bias': self.ix_R_bias,
'adjustment': self.ix_adjustment
}
return ix_tbl
def load_samples_file(self, ctx, vec):
"""Load initial parameter values from an external data file."""
if 'prior_samples' not in ctx.params['model']:
return
logger = logging.getLogger(__name__)
samples = ctx.params['model']['prior_samples']
data_dir = Path(ctx.params['data_dir'])
data_file = data_dir / samples['file']
columns = [(name, np.float) for name in samples['columns']]
tbl = pypfilt.io.read_table(data_file, columns)
if tbl.shape != vec[..., 0].shape:
raise ValueError('Incompatible data shapes: {} and {}'.format(
vec[..., 0].shape, tbl.shape))
ix_tbl = self.sample_columns()
for name in samples['columns']:
if name not in ix_tbl:
raise ValueError('Unknown parameter {}'.format(name))
ix = ix_tbl[name]
vec[..., ix] = tbl[name]
# NOTE: warn if sampled values exceed the parameter bounds.
min_val = np.min(tbl[name])
max_val = np.max(tbl[name])
if min_val < ctx.params['model']['param_min'][ix]:
logger.warning('Sampled value for {} outside bounds'
.format(name))
elif max_val > ctx.params['model']['param_max'][ix]:
logger.warning('Sampled value for {} outside bounds'
.format(name))
# Clip the sampled values to enforce the parameter bounds.
# The alternative is to leave the sample values as provided, in
# which case they will only be clipped if post-regularisation is
# enabled and the particles are resampled.
vec[..., ix] = np.clip(vec[..., ix],
ctx.params['model']['param_min'][ix],
ctx.params['model']['param_max'][ix])
def resume_from_cache(self, ctx):
"""
A simulation will begin from a saved state, so the model must check
whether any lookup tables are defined.
:param ctx: The simulation context.
"""
self.load_lookup_tables(ctx)
def load_lookup_tables(self, ctx):
"""
Allow R0 and imported cases to be provided via lookup tables.
:param ctx: The simulation context.
"""
logger = logging.getLogger(__name__)
tables = ctx.component.get('lookup', {})
# Check for the R0 lookup table.
if 'R0' in tables:
self.__R0_lookup = tables['R0']
logger.info('Using lookup table for R0 with {} values'.format(
self.__R0_lookup.value_count()))
# Check for the external exposures lookup table.
exp_table = 'external_exposures'
if exp_table in tables:
self.__external_lookup = tables[exp_table]
logger.info(
'Using lookup table for external exposures with {} values'
.format(self.__external_lookup.value_count()))
vacc_table = 'vaccinations'
if vacc_table in tables:
self.__vaccinations_lookup = tables[vacc_table]
logger.info(
'Using lookup table for vaccinations with {} values'
.format(self.__vaccinations_lookup.value_count()))
def init_lookup_values(self, ctx, vec):
"""
Initialise the ``R0_ix`` values if an R0 lookup table is defined.
:param ctx: The simulation context.
:param vec: An uninitialised state vector of correct dimensions (see
:py:func:`~state_size`).
"""
if self.__R0_lookup is not None:
num_values = self.__R0_lookup.value_count()
if num_values > 1:
rnd = ctx.component['random']['model']
rnd_size = vec[..., 0].shape
vec[..., self.ix_R0_ix] = rnd.integers(num_values,
size=rnd_size)
else:
vec[..., self.ix_R0_ix] = 0
def update(self, ctx, step_date, dt, is_fs, prev, curr):
"""Perform a single time-step.
:param ctx: The simulation context.
:param step_date: The date and time of the current time-step.
:param dt: The time-step size (days).
:param is_fs: Indicates whether this is a forecasting simulation.
:param prev: The state before the time-step.
:param curr: The state after the time-step (destructively updated).
"""
rnd = ctx.component['random']['model']
params = ctx.params
# Update parameters and lookup tables that are defined in self.init()
# and which will not exist if we are resuming from a cached state.
self.popn_size = ctx.params['model']['population_size']
# Extract each parameter.
R0 = prev[..., self.ix_R0].copy()
sigma = prev[..., self.ix_sigma].copy()
gamma = prev[..., self.ix_gamma].copy()
R0_ix = np.around(prev[..., self.ix_R0_ix]).astype(int)
R_bias = prev[..., self.ix_R_bias].copy()
adjustment = prev[..., self.ix_adjustment].copy()
current_state = prev[:, 0:(self.ix_R_V + 1)]
epoch = ctx.component['time'].to_scalar(ctx.params['epoch'])
curr_t = ctx.component['time'].to_scalar(step_date)
zero_mask = prev[..., self.ix_t0] > (curr_t - epoch)
if self.__R0_lookup is not None:
R0 = self.get_R0_from_lookup(ctx, step_date, is_fs, R0_ix, params)
external_exp = self.get_external_exposure_from_lookup(step_date, R0.shape)
vacc_rate, mean_vacc_Ei, mean_vacc_Et = self.get_vaccinations_from_lookup(step_date, R0.shape)
n_pop = self.popn_size
# Only calculate adj. factor for backcasts (expecting Reff to be held constant across forecasting period)
if not is_fs:
I_U, I_V = prev[..., self.ix_I1_U] + prev[..., self.ix_I2_U], prev[..., self.ix_I1_V] + prev[..., self.ix_I2_V]
S_U, S_V = prev[..., self.ix_S_U], prev[..., self.ix_S_V]
# Calculating our adjustment factor
# Not factoring out the 1/n out for numerical stability
adjustment = (I_U / n_pop + I_V / n_pop) / ((I_U / n_pop + (1 - mean_vacc_Et) * I_V / n_pop) * (S_U / n_pop + (1 - mean_vacc_Ei) * S_V / n_pop))
adjustment = np.nan_to_num(adjustment, nan = 0, posinf = 0)
R0[zero_mask] = 0
sigma[zero_mask] = 0
gamma[zero_mask] = 0
beta = R0 * adjustment * np.exp2(R_bias) * gamma
n_U = np.dot(current_state, self.comp_mask_U)
vacc_increase = np.divide(vacc_rate, n_U, out=np.zeros_like(n_U), where = n_U != 0)
compartment_out, compartment_in = self.get_compartment_change(current_state,
sigma, gamma, beta, external_exp, vacc_increase,
mean_vacc_Et, mean_vacc_Ei, n_pop,
rnd, dt)
curr[..., 0:(self.ix_R_V + 1)] = current_state + compartment_in - compartment_out # Flow between compartments
curr[..., self.ix_R0:] = prev[..., self.ix_R0:] # Keep parameters fixed.
curr[..., self.ix_R0_val] = R0 # Record the R0(t) values for each particle.
curr[..., self.ix_adjustment] = adjustment # Keep track of adjustment to be used when forecasting.
def get_compartment_change(self, current_state, sigma, gamma, beta, external_exp, vacc_increase, mean_vacc_Et, mean_vacc_Ei, n_pop, rnd, dt):
lambda_inf = current_state[..., self.ix_I1_U] + current_state[..., self.ix_I2_U] + \
(1 - mean_vacc_Et) * (current_state[..., self.ix_I1_V] + current_state[..., self.ix_I2_V])
n_particles = sigma.shape[0]
# flow_rate defines the transition rates across each n_particle particle, from one compartment to another
flow_rate = np.zeros((n_particles , self.n_compartments, self.n_compartments))
flow_rate[:, self.ix_S_U, self.ix_E1_U] = (beta * lambda_inf + external_exp) / n_pop
flow_rate[:, self.ix_S_V, self.ix_E1_V] = beta * lambda_inf * (1 - mean_vacc_Ei) / n_pop
flow_rate[np.index_exp[:] + tuple(self.sigma_transitions)] = 2 * sigma[:,None]
flow_rate[np.index_exp[:] + tuple(self.gamma_transitions)] = 2 * gamma[:,None]
flow_rate[np.index_exp[:] + tuple(self.vacc_transitions)] = vacc_increase[:,None]
flow_rate = -np.expm1(-flow_rate * dt)
flow_rate = rnd.binomial(current_state[..., None].astype(int), flow_rate)
# Scale down our outflow if > our compartment counts
out_row_sums = np.sum(flow_rate, axis = 2)
excess_out = (out_row_sums > current_state)
# Calculate outflows as proportion of max. possible
as_proportions = flow_rate[excess_out, :].astype(float) / out_row_sums[excess_out, None]
# Scale down to fill max possible
flow_rate[excess_out, :] = np.around(as_proportions * current_state[excess_out, None]).astype(int)
| |
|
(y > ub_b) |
(z > ub_c)
)
if return_as_new_model:
return model.select(~s)
else: # usual
self.add_model_by_id( model.select(~s), 'model')
# Methods for sharpening and comparing maps, models and calculating FSC values
def get_rms_f_list(self,
map_id = 'map_manager',
d_min = None, # minimum resolution for calculations
n_bins = None,
resolution = None, # nominal resolution
):
''' Return list of rms amplitude by bins '''
assert d_min and n_bins
from cctbx.maptbx.segment_and_split_map import map_coeffs_to_fp
map_coeffs=self.get_map_manager_by_id(
map_id).map_as_fourier_coefficients(d_min = d_min)
f_array = get_map_coeffs_as_fp_phi(map_coeffs, n_bins = n_bins,
d_min = d_min).f_array
rms_f_list = flex.double()
sthol2_list = flex.double()
dsd = f_array.d_spacings().data()
n_bins_use = min(n_bins,max(3,n_bins//3))
for i_bin in f_array.binner().range_used():
sel = f_array.binner().selection(i_bin)
f = map_coeffs.select(sel)
f_array_f = map_coeffs_to_fp(f)
rms_f = f_array_f.data().norm()
rms_f_list.append(rms_f)
d = dsd.select(sel)
if d.size() < 0:
d_avg = flex.mean(d)
sthol2 = 0.25/d_avg**2
sthol2_list.append(sthol2)
if i_bin-1 > n_bins_use and ((not resolution) or (d_avg >= resolution)):
n_bins_use = i_bin - 1
elif i_bin > 1:
sthol2_list.append(sthol2_list[-1])
else:
sthol2_list.append(0)
return group_args(
rms_f_list = rms_f_list,
sthol2_list = sthol2_list,
n_bins_use = n_bins_use)
def _update_kw_with_map_info(self, local_kw, previous_kw = None,
text = 'overall', have_previous_scaled_data = None,
map_id_scaled_list = None):
if have_previous_scaled_data:
# Expect map_id_1 to be in previous_kw['map_id_to_be_scaled_list']...
if previous_kw.get('map_id_1') and previous_kw.get('map_id_2'):
local_kw['map_id_1'] = previous_kw['map_id_scaled_list'][
previous_kw['map_id_to_be_scaled_list'].index(previous_kw['map_id_1'])]
print("Map 1 to use in determining scaling: '%s' " %(
local_kw['map_id_1']), file = self.log)
local_kw['map_id_2'] = previous_kw['map_id_scaled_list'][
previous_kw['map_id_to_be_scaled_list'].index(previous_kw['map_id_2'])]
print("Map 2 to use in determining scaling: '%s' " %(
local_kw['map_id_2']), file = self.log)
local_kw['map_id_to_be_scaled_list'] = previous_kw['map_id_scaled_list']
# New main map
local_kw['map_id'] = previous_kw['map_id_scaled_list'][
previous_kw['map_id_to_be_scaled_list'].index(previous_kw['map_id'])]
print("New main map to use in determining scaling: '%s' " %(
local_kw['map_id']),file = self.log)
if map_id_scaled_list:
local_kw['map_id_scaled_list'] = map_id_scaled_list
else:
local_kw['map_id_scaled_list'] = []
for id in local_kw['map_id_to_be_scaled_list']:
local_kw['map_id_scaled_list'].append(
'%s_%s' %(id, text))
print("Starting maps will come from: %s " %(
str(local_kw['map_id_to_be_scaled_list'])),file = self.log)
print("Sharpened maps will be in: %s " %(
local_kw['map_id_scaled_list']), file = self.log)
if local_kw.get('model_id') and self.get_model_by_id(local_kw['model_id']):
cc = self.map_model_cc(map_id=local_kw['map_id'])
print ("Current map-model CC for '%s': %.3f " %(local_kw['map_id'],cc),
file = self.log)
return local_kw
def find_k_sol_b_sol(self,
model = None,
d_min = None,
model_map_id = None,
comparison_map_id = None,
n_bins = 5):
''' Routine to guess k_sol and b_sol by low-resolution Fc calculation'''
if model_map_id is None:
model_map_id = 'map_from_model'
if comparison_map_id is None:
comparison_map_id = 'map_manager'
kb_list= [ [0,0],
[0.1,20], [0.1,50],
[0.2,20], [0.2,50],
[0.3,20], [0.3,50],
[0.15,20], [0.15,50],
[0.15,30], [0.15,40],
[0.15,10], [0.15,60],
[0.15,0], [0.15,5],
]
from cctbx.development.create_models_or_maps import generate_model, \
generate_map_coefficients
target_map_coeffs = self.get_map_manager_by_id(
comparison_map_id).map_as_fourier_coefficients( d_min = d_min)
(d_max,d_min)=target_map_coeffs.d_max_min(
d_max_is_highest_defined_if_infinite=True)
target_map_coeffs.setup_binner(n_bins = n_bins,
d_max=d_max,
d_min=d_min)
best_kb = None
best_cc = None
for k_sol,b_sol in kb_list:
map_coeffs = generate_map_coefficients(model = model,
d_min = d_min,
k_sol = k_sol,
b_sol = b_sol,
scattering_table = self.scattering_table(),
f_obs_array = target_map_coeffs,
log = null_out())
sum_cc = flex.double()
for i_bin in target_map_coeffs.binner().range_used():
sel = target_map_coeffs.binner().selection(i_bin)
cc1 = map_coeffs.select(sel).map_correlation(
target_map_coeffs.select(sel))
cc1 = (0 if cc1 is None else cc1)
sum_cc.append(cc1)
cc = sum_cc.min_max_mean().mean
if best_cc is None or cc > best_cc:
best_cc = cc
best_kb = [k_sol,b_sol]
return group_args(
k_sol = best_kb[0],
b_sol = best_kb[1],
cc = best_cc)
def tls_from_map(self,
map_id_1 = None,
map_id_2 = None,
map_id = None,
model_id = None,
mask_id = None,
tls_by_chain = True,
apply_tls_to_model = True,
iterations = 1,
skip_waters = True,
skip_hetero = True,
coordinate_shift_to_apply_before_tlso = None,
core_box_size_ratio = None,
box_cushion_ratio = None,
exclude_points_outside_density = True,
minimum_boxes_inside_density = True,
d_min = None,
**kw):
if iterations:
from libtbx import adopt_init_args
kw_obj = group_args()
adopt_init_args(kw_obj, locals())
all_kw = kw_obj() # save calling parameters in kw as dict
del all_kw['adopt_init_args'] # REQUIRED
del all_kw['kw_obj'] # REQUIRED
all_kw.update(kw)
del all_kw['kw']
all_kw['iterations'] = None
all_kw_use = deepcopy(all_kw)
print("\nRunning total of %s iterations of TLS from map " %(iterations),
file = self.log)
for iter in range(iterations-1):
print("\nRunning iteration %s of %s of TLS from map" %(
iter+1,iterations), file = self.log)
result = self.tls_from_map(**all_kw_use)
print("\nDone running extra iterations of TLS from map ",file = self.log)
if model_id is None:
model_id = 'model'
# Save all keywords we want to pass on in kw
kw['map_id_1'] = map_id_1
kw['map_id_2'] = map_id_2
kw['map_id'] = map_id
kw['model_id'] = model_id
kw['exclude_points_outside_density'] = exclude_points_outside_density
kw['d_min'] = d_min
# Set up list of maps to be scaled and kw
kw = self.set_map_id_lists(kw)
# Set keywords for tls_from_map
kw['local_sharpen'] = True
kw['anisotropic_sharpen'] = True
kw['get_scale_as_aniso_u'] = True
kw['get_tls_from_u'] = True
kw['get_tls_info_only'] = True
kw['replace_aniso_with_tls_equiv'] = False
kw['overall_sharpen_before_and_after_local'] = False
kw['coordinate_shift_to_apply_before_tlso'] =\
coordinate_shift_to_apply_before_tlso
print("\nRunning tls_from_map...\n",
file = self.log)
if kw.get('map_id_1') and kw.get('map_id_2'):
print("\nTLS will be determined by comparison of %s and %s " %(
kw['map_id_1'],kw['map_id_2']), file = self.log)
method = self.half_map_sharpen
del kw['model_id']
del kw['map_id']
elif kw.get('map_id') and kw.get('model_id'):
print("\nTLS will be determined by comparison of %s and %s " %(
kw['map_id'],kw['model_id']), file = self.log)
method = self.model_sharpen
del kw['map_id_1']
del kw['map_id_2']
else:
raise Sorry("Need two half-maps or map and model for get_tls_from_map")
# Run by chain if requested
if tls_by_chain:
print("TLS will be determined for each chain", file = self.log)
box_info = self._split_up_map_and_model(
model_id = model_id,
selection_method = 'by_chain',
skip_waters = skip_waters,
skip_hetero = skip_hetero,
mask_all_maps_around_edges = False,)
tlso_list = []
for mmm in box_info.mmm_list:
# working shift_cart is shift from original to working xyz
# box shift_cart is original to box
# to get coords in working frame, take box xyz and subtract box shift
# then add working shift
coordinate_shift = tuple(
[working_shift - box_shift for working_shift,box_shift in zip(
self.map_manager().shift_cart(), mmm.map_manager().shift_cart())]
)
kw['coordinate_shift_to_apply_before_tlso'] = coordinate_shift
box_info = mmm.tls_from_map(
core_box_size_ratio = core_box_size_ratio,
box_cushion_ratio = box_cushion_ratio,
tls_by_chain = False,
apply_tls_to_model = False,
iterations = None,
**kw)
for tlso in box_info.tlso_list:
tlso_list.append(tlso)
box_info.tlso_list = tlso_list
else:
print("TLS will be determined for entire model as one group",
file = self.log)
if core_box_size_ratio and (not kw.get('core_box_size')):
kw['core_box_size'] = core_box_size_ratio * self.resolution() # in A,
if box_cushion_ratio and (not kw.get('box_cushion')):
kw['box_cushion'] = box_cushion_ratio * self.resolution() # in A
tls_info = method(**kw)
# run overall sharpening
tlso_list = [tls_info.tlso]
mmm_list = [self]
box_info = group_args(
selection_list = None,
selection_as_text_list = None,
tlso_list = tlso_list,
mmm_list = [self])
if apply_tls_to_model and model_id and \
self.get_model_by_id(model_id = model_id):
# set the values in the model using
if not box_info.selection_list:
box_info.selection_list = [
self.get_model_by_id(model_id = model_id).selection('all')]
box_info.selection_as_text_list = ['all']
self.merge_split_maps_and_models(
model_id = model_id,
box_info = box_info,
replace_coordinates = False,
replace_u_aniso = True)
return box_info
def _sharpen_overall_local_overall(self, kw, method):
assert kw.get('map_id_to_be_scaled_list') is None or (
kw['map_id'] in kw['map_id_to_be_scaled_list']) # map_id_to_be_scaled not ok
# Set up list of maps to be scaled
kw['sharpen_all_maps'] = True # REQUIRED
kw = self.set_map_id_lists(kw) # MUST COME AFTER sharpen_all_maps
kw['overall_sharpen_before_and_after_local'] = False
# run sharpening without local sharpening first
# Then set maps to scale as the scaled maps from this run
final_map_id_scaled_list = deepcopy(kw['map_id_scaled_list'])
print("\nRunning overall sharpening, local , then overall...\n",
file = self.log)
if kw.get('map_id_1') and kw.get('map_id_2'):
print("\nSharpening will be determined by comparison of %s and %s " %(
kw['map_id_1'],kw['map_id_2']), file = self.log)
print("Starting maps will come from: %s " %(
str(kw['map_id_to_be_scaled_list'])),file = self.log)
print("Final sharpened maps will be in: %s " %(final_map_id_scaled_list),
file = self.log)
# Run overall sharpening
local_kw = deepcopy(kw) # save a copy
local_kw['local_sharpen'] = False
local_kw['overall_sharpen_before_and_after_local'] = False
local_kw_dc = deepcopy(local_kw)
local_kw = deepcopy(local_kw_dc)
print ("\n",79*"=","\nRunning overall sharpening now\n",79*"=","\n",
file = self.log)
local_kw = self._update_kw_with_map_info(local_kw, previous_kw = kw,
text = 'overall')
method( **local_kw) # run overall sharpening
# Now our new maps to be sharpened are in local_kw['map_id_scaled_list']
print ("\nDone with overall sharpening\n", file = self.log)
# Local sharpening
print ("\n",79*"=","\nRunning local sharpening\n",79*"=","\n",
file = self.log)
kw = self._update_kw_with_map_info(kw, previous_kw = local_kw,
text = 'local', have_previous_scaled_data = True)
method( **kw) # Run local sharpening
print ("\n",79*"=","\nRunning final overall sharpening\n",79*"=","\n",
file = self.log)
local_kw = deepcopy(local_kw_dc)
local_kw = self._update_kw_with_map_info(local_kw, previous_kw = kw,
text = 'final_overall', have_previous_scaled_data = True,
map_id_scaled_list = final_map_id_scaled_list)
method( **local_kw) # Run overall sharpening
print("Scaled maps are '%s' "%(
str(local_kw['map_id_scaled_list'])), file = self.log)
scaled_map_id = local_kw['map_id_scaled_list'][
local_kw['map_id_to_be_scaled_list'].index(local_kw['map_id'])]
print("\nFinal sharpened map is in '%s' in '%s' " %(
scaled_map_id, self.name), file = self.log)
if local_kw.get('model_id') and self.get_model_by_id(local_kw['model_id']):
cc = self.map_model_cc(model_id = local_kw['model_id'],
map_id = scaled_map_id)
print ("Current | |
# merge.py - directory-level update/merge handling for Mercurial
#
# Copyright 2006, 2007 <NAME> <<EMAIL>>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from node import nullid, nullrev, hex, bin
from i18n import _
from mercurial import obsolete
import error, util, filemerge, copies, subrepo, worker, dicthelpers
import errno, os, shutil
class mergestate(object):
'''track 3-way merge state of individual files'''
def __init__(self, repo):
self._repo = repo
self._dirty = False
self._read()
def reset(self, node=None):
self._state = {}
if node:
self._local = node
shutil.rmtree(self._repo.join("merge"), True)
self._dirty = False
def _read(self):
self._state = {}
try:
f = self._repo.opener("merge/state")
for i, l in enumerate(f):
if i == 0:
self._local = bin(l[:-1])
else:
bits = l[:-1].split("\0")
self._state[bits[0]] = bits[1:]
f.close()
except IOError, err:
if err.errno != errno.ENOENT:
raise
self._dirty = False
def commit(self):
if self._dirty:
f = self._repo.opener("merge/state", "w")
f.write(hex(self._local) + "\n")
for d, v in self._state.iteritems():
f.write("\0".join([d] + v) + "\n")
f.close()
self._dirty = False
def add(self, fcl, fco, fca, fd):
hash = util.sha1(fcl.path()).hexdigest()
self._repo.opener.write("merge/" + hash, fcl.data())
self._state[fd] = ['u', hash, fcl.path(), fca.path(),
hex(fca.filenode()), fco.path(), fcl.flags()]
self._dirty = True
def __contains__(self, dfile):
return dfile in self._state
def __getitem__(self, dfile):
return self._state[dfile][0]
def __iter__(self):
l = self._state.keys()
l.sort()
for f in l:
yield f
def files(self):
return self._state.keys()
def mark(self, dfile, state):
self._state[dfile][0] = state
self._dirty = True
def resolve(self, dfile, wctx, octx):
if self[dfile] == 'r':
return 0
state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
fcd = wctx[dfile]
fco = octx[ofile]
fca = self._repo.filectx(afile, fileid=anode)
# "premerge" x flags
flo = fco.flags()
fla = fca.flags()
if 'x' in flags + flo + fla and 'l' not in flags + flo + fla:
if fca.node() == nullid:
self._repo.ui.warn(_('warning: cannot merge flags for %s\n') %
afile)
elif flags == fla:
flags = flo
# restore local
f = self._repo.opener("merge/" + hash)
self._repo.wwrite(dfile, f.read(), flags)
f.close()
r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
if r is None:
# no real conflict
del self._state[dfile]
elif not r:
self.mark(dfile, 'r')
return r
def _checkunknownfile(repo, wctx, mctx, f):
return (not repo.dirstate._ignore(f)
and os.path.isfile(repo.wjoin(f))
and repo.wopener.audit.check(f)
and repo.dirstate.normalize(f) not in repo.dirstate
and mctx[f].cmp(wctx[f]))
def _checkunknown(repo, wctx, mctx):
"check for collisions between unknown files and files in mctx"
error = False
for f in mctx:
if f not in wctx and _checkunknownfile(repo, wctx, mctx, f):
error = True
wctx._repo.ui.warn(_("%s: untracked file differs\n") % f)
if error:
raise util.Abort(_("untracked files in working directory differ "
"from files in requested revision"))
def _forgetremoved(wctx, mctx, branchmerge):
"""
Forget removed files
If we're jumping between revisions (as opposed to merging), and if
neither the working directory nor the target rev has the file,
then we need to remove it from the dirstate, to prevent the
dirstate from listing the file when it is no longer in the
manifest.
If we're merging, and the other revision has removed a file
that is not present in the working directory, we need to mark it
as removed.
"""
actions = []
state = branchmerge and 'r' or 'f'
for f in wctx.deleted():
if f not in mctx:
actions.append((f, state, None, "forget deleted"))
if not branchmerge:
for f in wctx.removed():
if f not in mctx:
actions.append((f, "f", None, "forget removed"))
return actions
def _checkcollision(repo, wmf, actions, prompts):
# build provisional merged manifest up
pmmf = set(wmf)
def addop(f, args):
pmmf.add(f)
def removeop(f, args):
pmmf.discard(f)
def nop(f, args):
pass
def renameop(f, args):
f2, fd, flags = args
if f:
pmmf.discard(f)
pmmf.add(fd)
def mergeop(f, args):
f2, fd, move = args
if move:
pmmf.discard(f)
pmmf.add(fd)
opmap = {
"a": addop,
"d": renameop,
"dr": nop,
"e": nop,
"f": addop, # untracked file should be kept in working directory
"g": addop,
"m": mergeop,
"r": removeop,
"rd": nop,
}
for f, m, args, msg in actions:
op = opmap.get(m)
assert op, m
op(f, args)
opmap = {
"cd": addop,
"dc": addop,
}
for f, m in prompts:
op = opmap.get(m)
assert op, m
op(f, None)
# check case-folding collision in provisional merged manifest
foldmap = {}
for f in sorted(pmmf):
fold = util.normcase(f)
if fold in foldmap:
raise util.Abort(_("case-folding collision between %s and %s")
% (f, foldmap[fold]))
foldmap[fold] = f
def manifestmerge(repo, wctx, p2, pa, branchmerge, force, partial,
acceptremote=False):
"""
Merge p1 and p2 with ancestor pa and generate merge action list
branchmerge and force are as passed in to update
partial = function to filter file lists
acceptremote = accept the incoming changes without prompting
"""
overwrite = force and not branchmerge
actions, copy, movewithdir = [], {}, {}
followcopies = False
if overwrite:
pa = wctx
elif pa == p2: # backwards
pa = wctx.p1()
elif not branchmerge and not wctx.dirty(missing=True):
pass
elif pa and repo.ui.configbool("merge", "followcopies", True):
followcopies = True
# manifests fetched in order are going to be faster, so prime the caches
[x.manifest() for x in
sorted(wctx.parents() + [p2, pa], key=lambda x: x.rev())]
if followcopies:
ret = copies.mergecopies(repo, wctx, p2, pa)
copy, movewithdir, diverge, renamedelete = ret
for of, fl in diverge.iteritems():
actions.append((of, "dr", (fl,), "divergent renames"))
for of, fl in renamedelete.iteritems():
actions.append((of, "rd", (fl,), "rename and delete"))
repo.ui.note(_("resolving manifests\n"))
repo.ui.debug(" branchmerge: %s, force: %s, partial: %s\n"
% (bool(branchmerge), bool(force), bool(partial)))
repo.ui.debug(" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
copied = set(copy.values())
copied.update(movewithdir.values())
if '.hgsubstate' in m1:
# check whether sub state is modified
for s in sorted(wctx.substate):
if wctx.sub(s).dirty():
m1['.hgsubstate'] += "+"
break
aborts, prompts = [], []
# Compare manifests
fdiff = dicthelpers.diff(m1, m2)
flagsdiff = m1.flagsdiff(m2)
diff12 = dicthelpers.join(fdiff, flagsdiff)
for f, (n12, fl12) in diff12.iteritems():
if n12:
n1, n2 = n12
else: # file contents didn't change, but flags did
n1 = n2 = m1.get(f, None)
if n1 is None:
# Since n1 == n2, the file isn't present in m2 either. This
# means that the file was removed or deleted locally and
# removed remotely, but that residual entries remain in flags.
# This can happen in manifests generated by workingctx.
continue
if fl12:
fl1, fl2 = fl12
else: # flags didn't change, file contents did
fl1 = fl2 = m1.flags(f)
if partial and not partial(f):
continue
if n1 and n2:
fla = ma.flags(f)
nol = 'l' not in fl1 + fl2 + fla
a = ma.get(f, nullid)
if n2 == a and fl2 == fla:
pass # remote unchanged - keep local
elif n1 == a and fl1 == fla: # local unchanged - use remote
if n1 == n2: # optimization: keep local content
actions.append((f, "e", (fl2,), "update permissions"))
else:
actions.append((f, "g", (fl2,), "remote is newer"))
elif nol and n2 == a: # remote only changed 'x'
actions.append((f, "e", (fl2,), "update permissions"))
elif nol and n1 == a: # local only changed 'x'
actions.append((f, "g", (fl1,), "remote is newer"))
else: # both changed something
actions.append((f, "m", (f, f, False), "versions differ"))
elif f in copied: # files we'll deal with on m2 side
pass
elif n1 and f in movewithdir: # directory rename
f2 = movewithdir[f]
actions.append((f, "d", (None, f2, fl1),
"remote renamed directory to " + f2))
elif n1 and f in copy:
f2 = copy[f]
actions.append((f, "m", (f2, f, False),
"local copied/moved to " + f2))
elif n1 and f in ma: # clean, a different, no remote
if n1 != ma[f]:
prompts.append((f, "cd")) # prompt changed/deleted
elif n1[20:] == "a": # added, no remote
actions.append((f, "f", None, "remote deleted"))
else:
actions.append((f, "r", None, "other deleted"))
elif n2 and f in movewithdir:
f2 = movewithdir[f]
actions.append((None, "d", (f, f2, fl2),
"local renamed directory to " + f2))
elif n2 and f in copy:
f2 = copy[f]
if f2 in m2:
actions.append((f2, "m", (f, f, False),
"remote copied to " + f))
else:
actions.append((f2, "m", (f, f, True),
"remote moved to " + f))
elif n2 and | |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Module for reading 3D dicom data"""
from loguru import logger
from typing import Union
# import numpy as np
# import h5py
from pathlib import Path
import os.path
import sys
from .image import DataPlus, transform_orientation, transform_orientation_voxelsize
import pydicom as dicom
dicom.config.debug(False)
# NOTE(mareklovci - 2018_05_13): Absolute imports are prefered in Python, so eg. "from io3d import tgz" should be used.
# https://www.python.org/dev/peps/pep-0008/#imports
from . import dcmreaddata as dcmr
from . import tgz
from . import misc
from . import dcmtools
from . import network
from . import datasets
# Decorator used for labeling old or unsuitable functions as 'deprecated'
from io3d.deprecation import deprecated
# def is_documented_by(original):
# def wrapper(target):
# target.__doc__ = original.__doc__
# return target
# return wrapper
#
# @is_documented_by
def read(
datapath,
qt_app=None,
dataplus_format=True,
gui=False,
start=0,
stop=None,
step=1,
convert_to_gray=True,
series_number=None,
use_economic_dtype=True,
dicom_expected=None,
orientation_axcodes="original",
**kwargs
):
"""Returns 3D data and its metadata.
# NOTE(:param qt_app:) If it is set to None (as default) all dialogs for series selection are performed in
terminal. If qt_app is set to QtGui.QApplication() dialogs are in Qt.
:param datapath: directory with input data, if url is give, the file is downloaded into `~/data/downloads/`
:param qt_app: Dialog destination. If None (default) -> terminal, if 'QtGui.QApplication()' -> Qt
:param dataplus_format: New data format. Metadata and data are returned in one structure.
:param gui: True if 'QtGui.QApplication()' instead of terminal should be used
:param int start: used for DicomReader, defines where 3D data reading should start
:param int stop: used for DicomReader, defines where 3D data reading should stop
:param int step: used for DicomReader, defines step for 3D data reading
:param bool convert_to_gray: if True -> RGB is converted to gray
:param int series_number: used in DicomReader, essential in metadata
:param use_economic_dtype: if True, casts 3D data array to less space consuming dtype
:param dicom_expected: set true if it is known that data is in dicom format. Set False to suppress
dicom warnings.
:param orientation_axcodes: 'SPL' inferior to Superior, anterior to Posetrior, right to Left. Standard is for nifty
is RAS.
:return: tuple (data3d, metadata)
"""
# Simple read function. Internally calls DataReader.Get3DData()
dr = DataReader()
return dr.Get3DData(
datapath=datapath,
qt_app=qt_app,
dataplus_format=dataplus_format,
gui=gui,
start=start,
stop=stop,
step=step,
convert_to_gray=convert_to_gray,
series_number=series_number,
use_economic_dtype=use_economic_dtype,
dicom_expected=dicom_expected,
orientation_axcodes=orientation_axcodes,
**kwargs
)
# NOTE(mareklovci): The same code was used in two functions, so according to DRY principle I cleaned it up.
def _metadata(image, datapath):
"""Function which returns metadata dict.
:param image: image to get spacing from
:param datapath: path to data
:return: {'series_number': '', 'datadir': '', 'voxelsize_mm': ''}
"""
metadata = {"series_number": 0, "datadir": datapath}
spacing = image.GetSpacing()
metadata["voxelsize_mm"] = [spacing[2], spacing[0], spacing[1]]
return metadata
class DataReader:
def __init__(self):
self.overlay_fcn = None
# noinspection PyAttributeOutsideInit,PyUnboundLocalVariable,PyPep8Naming
def Get3DData(
self,
datapath: Union[str, Path],
qt_app=None,
dataplus_format=True,
gui=False,
start=0,
stop=None,
step=1,
convert_to_gray=True,
series_number=None,
use_economic_dtype=True,
dicom_expected=None,
orientation_axcodes="original",
**kwargs
):
"""Returns 3D data and its metadata.
# NOTE(:param qt_app:) If it is set to None (as default) all dialogs for series selection are performed in
terminal. If qt_app is set to QtGui.QApplication() dialogs are in Qt.
:param datapath: directory with input data, if url is give, the file is downloaded into `~/data/downloads/`
:param qt_app: Dialog destination. If None (default) -> terminal, if 'QtGui.QApplication()' -> Qt
:param dataplus_format: New data format. Metadata and data are returned in one structure.
:param gui: True if 'QtGui.QApplication()' instead of terminal should be used
:param int start: used for DicomReader, defines where 3D data reading should start
:param int stop: used for DicomReader, defines where 3D data reading should stop
:param int step: used for DicomReader, defines step for 3D data reading
:param bool convert_to_gray: if True -> RGB is converted to gray
:param int series_number: used in DicomReader, essential in metadata
:param use_economic_dtype: if True, casts 3D data array to less space consuming dtype
:param dicom_expected: set true if it is known that data is in dicom format. Set False to suppress
:param orientation_axcodes: 'LPS' right to Left, anterior to Posetrior, inferior to Superior
dicom warnings.
:return: tuple (data3d, metadata)
"""
if network.is_url(datapath):
datapath = datasets.fetch_file(str(datapath))
self.orig_datapath = datapath
datapath = os.path.expanduser(datapath)
if series_number is not None and type(series_number) != int:
series_number = int(series_number)
if not os.path.exists(datapath):
logger.error("Path '" + datapath + "' does not exist")
return
if qt_app is None and gui is True:
from PyQt5.QtWidgets import QApplication
qt_app = QApplication(sys.argv)
if type(datapath) is not str:
datapath = str(datapath)
datapath = os.path.normpath(datapath)
self.start = start
self.stop = stop
self.step = step
self.convert_to_gray = convert_to_gray
self.series_number = series_number
self.kwargs = kwargs
self.qt_app = qt_app
self.gui = gui
if os.path.isfile(datapath):
logger.debug("file read recognized")
data3d, metadata = self.__ReadFromFile(datapath)
elif os.path.exists(datapath):
logger.debug("directory read recognized")
data3d, metadata = self.__ReadFromDirectory(
datapath=datapath, dicom_expected=dicom_expected
)
# datapath, start, stop, step, gui=gui, **kwargs)
else:
logger.error("Data path {} not found".format(datapath))
if orientation_axcodes:
if orientation_axcodes == "original":
logger.warning(
"orientation_axcodes default value will be changed in the furture to 'LPS'"
)
else:
data3d = transform_orientation(
data3d, metadata["orientation_axcodes"], orientation_axcodes
)
metadata["voxelsize_mm"] = transform_orientation_voxelsize(
metadata["voxelsize_mm"],
metadata["orientation_axcodes"],
orientation_axcodes,
)
metadata["orientation_axcodes"] = orientation_axcodes
if convert_to_gray:
if len(data3d.shape) > 3:
# TODO: implement better rgb2gray
data3d = data3d[:, :, :, 0]
if use_economic_dtype:
data3d = self.__use_economic_dtype(data3d)
if dataplus_format:
logger.debug("dataplus format")
# metadata = {'voxelsize_mm': [1, 1, 1]}
datap = metadata
datap["data3d"] = data3d
logger.debug("datap keys () : " + str(datap.keys()))
return DataPlus(datap)
else:
return data3d, metadata
# noinspection PyPep8Naming
def __ReadFromDirectory(self, datapath, dicom_expected=None):
"""This function is actually the ONE, which reads 3D data from file
:param datapath: path to file
:return: tuple (data3d, metadata)
"""
start = self.start
stop = self.stop
step = self.step
kwargs = self.kwargs
gui = self.gui
if (dicom_expected is not False) and (
dcmr.is_dicom_dir(datapath)
): # reading dicom
logger.debug("Dir - DICOM")
logger.debug("dicom_expected " + str(dicom_expected))
reader = dcmr.DicomReader(
datapath, series_number=self.series_number, gui=gui, **kwargs
) # qt_app=None, gui=True)
data3d = reader.get_3Ddata(start, stop, step)
metadata = reader.get_metaData()
metadata["series_number"] = reader.series_number
metadata["datadir"] = datapath
# metadata["orientation_axcodes"] # inserted in dicomreader
self.overlay_fcn = reader.get_overlay
else: # reading image sequence
logger.debug("Dir - Image sequence")
logger.debug("Getting list of readable files...")
flist = []
try:
import SimpleITK
except ImportError as e:
logger.error("Unable to import SimpleITK. On Windows try version 1.0.1")
raise e
for f in os.listdir(datapath):
try:
SimpleITK.ReadImage(os.path.join(datapath, f))
except Exception as e:
logger.warning("Cant load file: " + str(f))
logger.warning(e)
continue
flist.append(os.path.join(datapath, f))
flist.sort()
logger.debug("Reading image data...")
image = SimpleITK.ReadImage(flist)
logger.debug("Getting numpy array from image data...")
data3d = SimpleITK.GetArrayFromImage(image)
metadata = _metadata(image, datapath)
metadata["orientation_axcodes"] = "SPL"
return data3d, metadata
@staticmethod
def __use_economic_dtype(data3d):
"""Casts 3D data array to less space consuming dtype.
:param data3d: array of 3D data to reformat
:return: reformated array
"""
return misc.use_economic_dtype(data3d)
# noinspection PyPep8Naming
def __ReadFromFile(self, datapath):
"""Reads file and returns containing 3D data and its metadata.
Supported formats: pklz, pkl, hdf5, idx, dcm, Dcm, dicom, bz2 and "raw files"
:param datapath: path to file to read
:return: tuple (data3d, metadata)
"""
def _create_meta(_datapath):
"""Just simply returns some dict. This functions exists in order to keep DRY"""
meta = {"series_number": 0, "datadir": _datapath}
return meta
path, ext = os.path.splitext(datapath)
ext = ext[1:]
if ext in ("pklz", "pkl"):
logger.debug("pklz format detected")
from . import misc
data = misc.obj_from_file(datapath, filetype="pkl")
data3d = data.pop("data3d")
# metadata must have series_number
metadata = _create_meta(datapath)
metadata.update(data)
elif ext in ("hdf5", "h5"):
from . import hdf5_io
datap = hdf5_io.load_dict_from_hdf5(datapath)
# datap = self.read_hdf5(datapath)
data3d = datap.pop("data3d")
# back compatibility
if "metadata" in datap.keys():
datap = datap["metadata"]
# metadata must have series_number
metadata = _create_meta(datapath)
metadata.update(datap)
elif str(datapath).lower().endswith(".nii.gz"): # or ext == 'nii':
from . import nifti_io
data3d, metadata = nifti_io.read_nifti(datapath)
elif ext in ["idx"]:
from . import idxformat
idxreader = idxformat.IDXReader()
data3d, metadata = idxreader.read(datapath)
elif ext in ["dcm", "DCM", "dicom"]:
data3d, metadata = self._read_with_sitk(datapath)
metadata = self._fix_sitk_bug(datapath, metadata)
elif ext in ["bz2"]:
new_datapath = tgz.untar(datapath)
data3d, metadata = self.__ReadFromDirectory(new_datapath)
else:
logger.debug('file format "' + str(ext) + '"')
# reading raw file
data3d, metadata = self._read_with_sitk(datapath)
if "orientation_axcodes" not in metadata.keys():
metadata["orientation_axcodes"] = "SPL"
return data3d, metadata
@staticmethod
def _read_with_sitk(datapath):
"""Reads file using SimpleITK. Returns array of pixels (image located in datapath) and its | |
in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHCHD.append({S[i], H[j], C[k], H[l], D[m]})
STRAIGHT_SHCHD.append({S[9], H[10], C[11], H[12], D[0]})
STRAIGHT_SHCDS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHCDS.append({S[i], H[j], C[k], D[l], S[m]})
STRAIGHT_SHCDS.append({S[9], H[10], C[11], D[12], S[0]})
STRAIGHT_SHCDC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHCDC.append({S[i], H[j], C[k], D[l], C[m]})
STRAIGHT_SHCDC.append({S[9], H[10], C[11], D[12], C[0]})
STRAIGHT_SHCDH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHCDH.append({S[i], H[j], C[k], D[l], H[m]})
STRAIGHT_SHCDH.append({S[9], H[10], C[11], D[12], H[0]})
STRAIGHT_SHCDD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHCDD.append({S[i], H[j], C[k], D[l], D[m]})
STRAIGHT_SHCDD.append({S[9], H[10], C[11], D[12], D[0]})
STRAIGHT_SHHSS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHSS.append({S[i], H[j], H[k], S[l], S[m]})
STRAIGHT_SHHSS.append({S[9], H[10], H[11], S[12], S[0]})
STRAIGHT_SHHSC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHSC.append({S[i], H[j], H[k], S[l], C[m]})
STRAIGHT_SHHSC.append({S[9], H[10], H[11], S[12], C[0]})
STRAIGHT_SHHSH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHSH.append({S[i], H[j], H[k], S[l], H[m]})
STRAIGHT_SHHSH.append({S[9], H[10], H[11], S[12], H[0]})
STRAIGHT_SHHSD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHSD.append({S[i], H[j], H[k], S[l], D[m]})
STRAIGHT_SHHSD.append({S[9], H[10], H[11], S[12], D[0]})
STRAIGHT_SHHCS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHCS.append({S[i], H[j], H[k], C[l], S[m]})
STRAIGHT_SHHCS.append({S[9], H[10], H[11], C[12], S[0]})
STRAIGHT_SHHCC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHCC.append({S[i], H[j], H[k], C[l], C[m]})
STRAIGHT_SHHCC.append({S[9], H[10], H[11], C[12], C[0]})
STRAIGHT_SHHCH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHCH.append({S[i], H[j], H[k], C[l], H[m]})
STRAIGHT_SHHCH.append({S[9], H[10], H[11], C[12], H[0]})
STRAIGHT_SHHCD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHCD.append({S[i], H[j], H[k], C[l], D[m]})
STRAIGHT_SHHCD.append({S[9], H[10], H[11], C[12], D[0]})
STRAIGHT_SHHHS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHHS.append({S[i], H[j], H[k], H[l], S[m]})
STRAIGHT_SHHHS.append({S[9], H[10], H[11], H[12], S[0]})
STRAIGHT_SHHHC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHHC.append({S[i], H[j], H[k], H[l], C[m]})
STRAIGHT_SHHHC.append({S[9], H[10], H[11], H[12], C[0]})
STRAIGHT_SHHHH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHHH.append({S[i], H[j], H[k], H[l], H[m]})
STRAIGHT_SHHHH.append({S[9], H[10], H[11], H[12], H[0]})
STRAIGHT_SHHHD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHHD.append({S[i], H[j], H[k], H[l], D[m]})
STRAIGHT_SHHHD.append({S[9], H[10], H[11], H[12], D[0]})
STRAIGHT_SHHDS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHDS.append({S[i], H[j], H[k], D[l], S[m]})
STRAIGHT_SHHDS.append({S[9], H[10], H[11], D[12], S[0]})
STRAIGHT_SHHDC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHDC.append({S[i], H[j], H[k], D[l], C[m]})
STRAIGHT_SHHDC.append({S[9], H[10], H[11], D[12], C[0]})
STRAIGHT_SHHDH = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHDH.append({S[i], H[j], H[k], D[l], H[m]})
STRAIGHT_SHHDH.append({S[9], H[10], H[11], D[12], H[0]})
STRAIGHT_SHHDD = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHHDD.append({S[i], H[j], H[k], D[l], D[m]})
STRAIGHT_SHHDD.append({S[9], H[10], H[11], D[12], D[0]})
STRAIGHT_SHDSS = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for m in range(4, 13):
if m == l + 1 and l == k + 1 and k == j + 1 and j == i + 1:
STRAIGHT_SHDSS.append({S[i], H[j], D[k], S[l], S[m]})
STRAIGHT_SHDSS.append({S[9], H[10], D[11], S[12], S[0]})
STRAIGHT_SHDSC = []
for i in range(13):
for j in range(1, 13):
for k in range(2, 13):
for l in range(3, 13):
for | |
* yscale) - y_min
steep = False
if abs(xl-xr) < abs(yl-yr):
xl,yl = yl,xl
xr,yr = yr,xr
steep = True
if xr < xl:
xr,xl = xl,xr
yr,yl = yl,yr
# Start at left endpoint
x = xl
y = yl
# Delta X and Y
dx = xr - xl
dy = yr - yl
# Test for vertical line
if dx == 0:
if dy == 0:
# Single plot point
if steep:
self._putpixel(y, x, extents)
else:
self._putpixel(x, y, extents)
return
if yr < yl:
yr,yl = yl,yr
for y in range(yl, yr+1):
# Single vertical line
if steep:
self._putpixel(y, x, extents)
else:
self._putpixel(x, y, extents)
return
# Error terms
derror = abs(dy/dx)
err = 0
while x <= xr:
if steep:
self._putpixel(y, x, extents)
else:
self._putpixel(x, y, extents)
err += derror
if err > 0.5:
y += (1 if yr > yl else -1)
err -= 1
x += 1
if color is not None:
self.pop_line_color()
def push_line_color(self, color: str | Style) -> None:
""" Push a new line color / style to the style stack. """
self.style_stack.append(self.style)
if isinstance(color, Style):
self.style = color
else:
self.style = Style(color = color)
def pop_line_color(self) -> None:
""" Pop the current line color / style from the style stack. """
if len(self.style_stack) > 0:
self.style = self.style_stack.pop()
def push_block_chars(self) -> None:
""" Configures the canvas for block character rendering """
#new_style = Style.combine([self.style, Style(italic=True]))
#self.push_line_color(new_style)
self.block_chars += 1
def pop_block_chars(self) -> None:
""" Pops the block character mode from the stack """
#self.pop_line_color()
self.block_chars -= 1
def render_canvas(self) -> None:
""" Draws the data to the bare canvas """
pass
def add_x_label(self, label: str | Text) -> None:
""" Add an X label to the graph """
self.x_label = label
def add_y_label(self, label: str | Text) -> None:
""" Add a Y label to the graph """
self.y_label = label
def add_title(self, title: str | Text) -> None:
""" Add a Y label to the graph """
self.title = title
def add_x_axis(self, axis: PlotAxis, style: StyleType | None = None) -> None:
""" Add an X axis to the graph """
self.x_axis = axis
self.x_axis_style = style
def add_y_axis(self, axis: PlotAxis, style: StyleType | None = None) -> None:
""" Add an Y axis to the graph """
self.y_axis = axis
self.y_axis_style = style
def add_annotation(self, x: float, y: float, text: Text) -> None:
""" Add a Text annotation at the given coordinate """
self.annotations.append(PlotAnnotation(x, y, text))
def render_annotations(self, y_axis_width: int, text_lines: list(str)) -> None:
""" Renders text annotations to the list of strings rendered from the canvas """
if len(self.annotations) == 0:
return
# Account for frame and padding
xscale = (self.plot_width-y_axis_width) / (self.x_max - self.x_min)
yscale = self.plot_height / (self.y_max - self.y_min)
x_min = int(self.x_min * xscale)
x_max = int(self.x_max * xscale)
y_min = int(self.y_min * yscale)
y_max = int(self.y_max * yscale)
# Loop for all annotations
for a in self.annotations:
x = int(a.x * xscale / 2)
y = len(text_lines) - int((a.y * yscale - y_min) / 4) - 1
restore_color = ''
an_text = str(a.text)
# Find the 'x' location within the 'y' string
if y>= 0 and y < len(text_lines) and x >= x_min and x < x_max:
text_len = 0
s = text_lines[y]
in_attr = False
for idx in range(len(s)):
if s[idx] == '[':
in_attr = True
restore_color = ''
elif in_attr:
if s[idx] == ']':
in_attr = False
else:
restore_color += s[idx]
else:
# Test if we found the insertion point
if text_len == x:
break
text_len += 1
# Strip out len(a.text) characters after this, taking into account
# that the string might contain [color] modifiers
rem = len(a.text)
loc = idx
in_attr = False
end_text = s[idx:]
repl = ''
try:
while rem > 0:
if s[loc] == '[':
in_attr = True
elif s[loc] == ']':
in_attr = False
elif not in_attr:
repl += s[loc]
rem -= 1
loc += 1
post = s[loc:]
except IndexError:
post = '\n'
an_text = an_text[:-rem-3]
# Insert the text at 'idx' within the string
if isinstance(a.text, Text):
text_lines[y] = s[:idx] + f"[not bold {a.text.style.color.name}]" + an_text + f"[{restore_color}]" + post
else:
text_lines[y] = s[:idx] + f"[not bold white]" + an_text + post
def _putpixel(self, x: int, y: int, ext: PlotExtents) -> None:
""" Private method to set/clear a single pixel in the canvas """
px = int(x/2)
py = int(y/4)
if x >= ext.xmin and x < ext.xmax and y >= (ext.ymin-ext.ymin) and y < (ext.ymax-ext.ymin):
if self.style is not None and self.style.conceal:
self.canvas[y][x] = 0
else:
self.canvas[y][x] = 1
if self.block_chars > 0:
self.palette[py][px] = self.style+Style(italic=True) or self.palette[py][px]
self.block_palette[int(y/2)][px] = self.style+Style(italic=True) or self.block_palette[int(y/2)][px]
else:
self.palette[py][px] = self.style or self.palette[py][px]
def _draw_circle_dots(self, xc: int, yc: int, x: int, y: int, ext: PlotExtents, filled: bool = False) -> None:
""" Private routine used by the draw_circle method to draw portions of the circle """
if filled:
x1 = xc-x
x2 = xc+x
if x1 > x2:
x1,x2 = x2, x1
for i in range(x1, x2+1):
self._putpixel(i, yc+y, ext)
self._putpixel(i, yc-y, ext)
x1 = xc-y
x2 = xc+y
if x1 > x2:
x1,x2 = x2, x1
for i in range(x1, x2+1):
self._putpixel(i, yc+x, ext)
self._putpixel(i, yc-x, ext)
else:
self._putpixel(xc+x, yc+y, ext)
self._putpixel(xc-x, yc+y, ext)
self._putpixel(xc+x, yc-y, ext)
self._putpixel(xc-x, yc-y, ext)
self._putpixel(xc+y, yc+x, ext)
self._putpixel(xc-y, yc+x, ext)
self._putpixel(xc+y, yc-x, ext)
self._putpixel(xc-y, yc-x, ext)
def draw_circle(self,
x: float,
y: float,
radius: int,
filled: bool = False,
color: str | Style | None = None
) -> None:
""" Renders a circle to the canvas using the current style """
if color is not None:
self.push_line_color(color)
# Account for frame and padding
xscale = self.plot_width / (self.x_max - self.x_min)
yscale = self.plot_height / (self.y_max - self.y_min)
x_min = int(self.x_min * xscale)
x_max = int(self.x_max * xscale)
y_min = int(self.y_min * yscale)
y_max = int(self.y_max * yscale)
extents = PlotExtents(x_min, x_max, y_min, y_max)
xc = int(x * xscale)
yc = int(y * yscale) - y_min
x = 0
y = radius
d = 3 - 2 * radius
self._draw_circle_dots(xc, yc, x, y, extents, filled)
while y >= x:
x += 1
if d > 0:
y -= 1
d += 4 * (x - y) + 10
else:
d += 4 * x + 6
self._draw_circle_dots(xc, yc, x, y, extents, filled)
if color is not None:
self.pop_line_color()
def draw_rect(self,
x1: float,
y1: float,
w: float,
h: float,
filled: bool = False,
color: str | Style | None = None
) -> None:
""" Renders a rectangle to the canvas using the current style """
if color is not None:
self.push_line_color(color)
# For filled rectangle, we draw multiple horizontal lines
if filled:
# Account for frame and padding
xscale = self.plot_width / (self.x_max - self.x_min)
yscale = self.plot_height / (self.y_max - self.y_min)
x_min = int(self.x_min * xscale)
x_max = int(self.x_max * xscale)
y_min = int(self.y_min * yscale)
y_max = int(self.y_max * yscale)
extents = PlotExtents(x_min, x_max, y_min, y_max)
x1 = int(x1 * xscale)
x2 = x1 + int(w * xscale)
y1 = int(y1 * yscale) - y_min
y2 = y1 + int(h * yscale)
if x1 > x2:
x1,x2 = x2,x1
ydelta = 1
if y1 > y2:
ydelta = -1
for x in range(x1, x2+1):
for y in range(y1, y2+ydelta, ydelta):
self._putpixel(x, y, extents)
else:
# We just need to draw the 4 outline lines
self.draw_line(x1, y1, x1+w, y1)
self.draw_line(x1, y1+h, x1+w, y1+h)
self.draw_line(x1, y1, x1, y1+h)
self.draw_line(x2, y1, x1+w, y1+h)
if color is not None:
self.pop_line_color()
def draw_pbm(self, x: float, y: float, filename: str) -> None:
""" Render an PBM image from filename to x,y """
if not have_pil:
self.draw_circle(x+20, y+20, 3)
return
img = Image.open(filename)
px = img.load()
# Account for frame and padding
xscale | |
<reponame>coderepocenter/AxisUtilities
from __future__ import annotations
from typing import Iterable, Callable
import numpy as np
import dask.array as da
from numba import prange
from scipy.sparse import csr_matrix
from axisutilities import Axis
class AxisRemapper:
"""
`AxisRemapper` facilitates conversion between two one-dimensional axis. Originally the idea started for performing
various conversion between time axis. For example, let's say you have a hourly data and you want to average it to
daily data. Or you have a daily data and you want to average it to weekly, monthly, or yearly data. Or may be you
want to calculate daily minimum and maximum from an hourly data. However, since the same concept could be applied
to any one-dimensional axis, the usage was generalized and the name was chaned to `AxisRemapper`.
`AxisRemapper` caches bulk of the computations. Hence, once you create an object of the `AxisRemapper` you could
reuse it; hence, avoid re-doing certain computations, as long as the source/origin axis and the destination axis
remain the same.
`AxisRemapper` applies the calculation on multi-dimensional data as well. By default, it assumes that the axis is
the first dimension. If it is not the case, you could define the axis that that the conversion needs to happen.
Currently it supports calculating `average`, `minimum`, `maximum`, or any user defined function (any Python
Callable object).
Examples:
* Creating an `AxisRemapper` and calculating average:
>>> from axisutilities import AxisRemapper
>>> from axisutilities import DailyTimeAxisBuilder
>>> from axisutilities import WeeklyTimeAxisBuilder
>>> from datetime import date
>>> daily_axis = DailyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=14
... ).build()
>>> weekly_axis = WeeklyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=2
... ).build()
Now we are ready to create an `AxisRemapper` object:
>>> ac = AxisRemapper(from_axis=daily_axis, to_axis=weekly_axis)
Let's create some random data:
>>> # Creating some random data
... import numpy as np
>>> daily_data = np.random.random((14,1))
Now to convert from daily axis to weekly axis all we need to do is:
>>> weekly_avg = ac.average(daily_data)
>>> weekly_avg
array([[0.71498815],
[0.60443017]])
Let's create another random data; but this time make it multi-dimensional. Note that the first dimension
is the source axis.
>>> # creating a multidimensional data
... daily_data = np.random.random((14, 3, 4, 5))
Now we could convert this new data using the same `AxisRemapper` object that we created. No need to create
a new one. We could reuse it as long as the source and destination axis have not changed.
>>> weekly_avg = ac.average(daily_data)
>>> weekly_avg.shape
(2, 3, 4, 5)
Lets create another multi-dimensional data where the first dimension is not the source axis:
>>> # creating a multi-dimensional data with the axis being the last dimension
... daily_data = np.random.random((3, 4, 5, 14))
You could still use the same `AxisRemapper`; All you need to do is to tell what dimension is the source axis:
>>> weekly_avg = ac.average(daily_data,dimension=3)
>>> weekly_avg.shape
(3, 4, 5, 2)
Similarly you could also calculate the weekly min and max:
>>> # Calculating min and max:
... weekly_min = ac.min(data)
>>> weekly_min
array([[0.19497718],
[0.014242 ]])
>>> weekly_max = ac.max(data)
>>> weekly_max
array([[0.99156943],
[0.64039361]])
* Applying a user-defined function:
>>> from axisutilities import AxisRemapper
>>> from axisutilities import DailyTimeAxisBuilder
>>> from axisutilities import WeeklyTimeAxisBuilder
>>> from datetime import date
>>> import numpy as np
>>>
>>> daily_axis = DailyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=14
... ).build()
>>>
>>> weekly_axis = WeeklyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=2
... ).build()
>>>
>>> ac = AxisRemapper(from_axis=daily_axis, to_axis=weekly_axis)
>>>
>>> def user_defined_function(data):
... return np.nansum(data, axis=0) * 42
...
>>> daily_data = np.random.random((3, 4, 5, 14))
>>>
>>> weekly_user_defined = ac.apply_function(daily_data, user_defined_function, dimension=3)
* Creating Axis-Converter covering different periods: Although from- and to-axis could have different
granularity, eg. one could be daily, another weekly; however, they both must cover the same period in total.
For example, they both must start at January 1st, and end on May 6th. If you want to turn this check off,
pass an extra arguments, called `assure_no_bound_mismatch` and set it to false.
>>> from_axis = DailyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=14
... ).build()
>>> to_axis = WeeklyTimeAxisBuilder(
... start_date=date(2019, 1, 1),
... n_interval=3
... ).build()
>>> # This will generate exception and it would fail:
... # tc = AxisRemapper(from_axis=from_axis, to_axis=to_axis)
... # instead use the following:
... tc = AxisRemapper(
... from_axis=from_axis,
... to_axis=to_axis,
... assure_no_bound_mismatch=False
... )
"""
@staticmethod
def _assure_no_bound_missmatch(fromAxis: Axis, toAxis: Axis) -> bool:
return (fromAxis.lower_bound[0, 0] == toAxis.lower_bound[0, 0]) and \
(fromAxis.upper_bound[0, -1] == toAxis.upper_bound[0, -1])
def __init__(self, **kwargs) -> None:
if ("from_axis" in kwargs) and ("to_axis" in kwargs):
from_ta = kwargs["from_axis"]
to_ta = kwargs["to_axis"]
if not (isinstance(from_ta, Axis) and isinstance(to_ta, Axis)):
raise TypeError("provided from/to_axis must be of type TimeAxis.")
self._m = to_ta.nelem
self._n = from_ta.nelem
self._weight_matrix = self._get_coverage_csr_matrix(from_ta, to_ta)
self._from_ta = from_ta
self._to_ta = to_ta
else:
raise ValueError("Not enough information is provided to construct the TimeAxisRemapper.")
if bool(kwargs.get("assure_no_bound_mismatch", True)) and \
(not AxisRemapper._assure_no_bound_missmatch(self._from_ta, self._to_ta)):
raise ValueError("from- and to-axis cover a different period. Although from- and to-axis could have "
"different granularity, eg. one could be daily, another weekly; however, they both must "
"cover the same period in total. For example, they both must start at January 1st, and end"
" on May 6th. If you want to turn this check off, pass an extra arguments, called "
"`assure_no_bound_mismatch` and set it to false")
@property
def from_nelem(self):
return self._n
@from_nelem.setter
def from_nelem(self, v):
pass
@property
def to_nelem(self):
return self._m
@to_nelem.setter
def to_nelem(self, v):
pass
@property
def weights(self) -> csr_matrix:
return self._weight_matrix.copy()
@weights.setter
def weights(self, v):
pass
@property
def from_axis(self):
return self._from_ta
@from_axis.setter
def from_axis(self, v):
pass
@property
def to_axis(self):
return self._to_ta
@to_axis.setter
def to_axis(self, v):
pass
@staticmethod
def _prep_input_data(in_data: Iterable, time_dimension, n) -> (np.ndarray, tuple):
if not isinstance(in_data, Iterable):
raise TypeError("input data should be an Iterable that can be casted to numpy.ndarray.")
in_data_copy = in_data
if not isinstance(in_data_copy, np.ndarray):
in_data_copy = np.asarray(in_data_copy, dtype="float64")
if in_data_copy.ndim == 1:
in_data_copy = in_data_copy.reshape((-1, 1))
if in_data_copy.shape[time_dimension] != n:
raise ValueError("The time dimension does not matches to that of the provided time converter.")
if time_dimension != 0:
in_data_copy = np.moveaxis(in_data_copy, time_dimension, 0)
trailing_shape = in_data_copy.shape[1:]
in_data_copy = in_data_copy.reshape((n, -1))
return in_data_copy, trailing_shape
@staticmethod
def _prep_output_data( out_data: np.ndarray, time_dimension, trailing_shape: tuple):
return np.moveaxis(out_data.reshape((out_data.shape[0], *trailing_shape)), 0, time_dimension)
def average(self, from_data: Iterable, dimension=0):
if isinstance(from_data, Iterable):
return self._average(from_data, self._weight_matrix, dimension)
elif isinstance(from_data, da.Array):
shape = from_data.shape
chunksize = from_data.chunksize
if shape[dimension] != chunksize[dimension]:
new_chunksize = list(chunksize)
new_chunksize[dimension] = shape[dimension]
from_data = from_data.rechunk(tuple(new_chunksize))
return from_data.map_blocks(self._average, weights=self._weight_matrix, dimension=dimension, dtype=from_data.dtype)
else:
raise NotImplementedError()
@staticmethod
def _average(from_data: Iterable, weights: csr_matrix, dimension=0) -> np.ndarray:
from_data_copy, trailing_shape = AxisRemapper._prep_input_data(from_data, dimension, weights.shape[1])
nan_mask = np.isnan(from_data_copy)
non_nan_mask = np.ones(from_data_copy.shape, dtype=np.int8)
non_nan_mask[nan_mask] = 0
from_data_copy[nan_mask] = 0
inverse_sum_effective_weights = np.reciprocal(weights * non_nan_mask)
output = AxisRemapper._prep_output_data(
np.multiply(weights * from_data_copy, inverse_sum_effective_weights),
dimension,
trailing_shape
)
return output
def apply_function(self, from_data: Iterable, func2apply: Callable, dimension=0):
if isinstance(from_data, Iterable):
return self._apply_function(from_data, func2apply, self.to_nelem, self._weight_matrix, dimension)
elif isinstance(from_data, da.Array):
shape = from_data.shape
chunksize = from_data.chunksize
if shape[dimension] != chunksize[dimension]:
new_chunksize = list(chunksize)
new_chunksize[dimension] = shape[dimension]
from_data = from_data.rechunk(tuple(new_chunksize))
return from_data.map_blocks(self._apply_function, func2apply=func2apply, to_nelem=self.to_nelem, weights=self._weight_matrix, dimension=dimension, dtype=from_data.dtype)
else:
raise NotImplementedError()
@staticmethod
def _apply_function(data: Iterable, func2apply: Callable, to_nelem: int, weights: csr_matrix, dimension=0):
"""
Applies a user-defined/provided function for the conversion.
:param data: The data on the source-axis that needs to be converted to the destination axis using the
user-provided function.
:param func2apply: The user provided function. This function should assume that it will receives a `m` by `s`
matrix and it should return `1` by `s` output data. It should also handle the `NaN` or
missing values properly.
:param dimension: The dimension where the source axis is. By default, it is assumed that the first dimension
is the source axis.
:return: a data with the same number of dimension of the input, where each element is the result of the user
defined function. All the dimensions are the same as the input data | |
waveguide: The base waveguide material and size in the form of :code:`Box`.
wavelength: Wavelength for the mode solver.
num_modes: Number of modes that should be solved.
wg_height: The waveguide height.
sub_eps: The substrate epsilon (defaults to air)
sub_height: The height of the substrate (or the min height of the waveguide built on top of it)
coupling_gap: The coupling gap specified means we get a pair of base blocks
separated by :code:`coupling_gap`.
block: Perturbing block.
sep: Separation of the block from the base waveguide layer.
vertical: Whether the perturbing block moves vertically, or laterally otherwise.
rib_y: Rib section
Returns:
The resulting :code:`ModeLibrary` with the modified :code:`eps` property.
"""
solver = ModeSolver(size, spacing, wavelength).block_design(
waveguide, wg_height, sub_eps, sub_height, coupling_gap,
block, sep, vertical, rib_y)
return cls(solver, num_modes)
def _check_num_modes(self, mode_idx: int):
if mode_idx > self.m - 1:
raise ValueError(f"Out of range of number of guided mode solutions {self.m}.")
return mode_idx
def h(self, mode_idx: int = 0, tm_2d: bool = True) -> np.ndarray:
"""Magnetic field :math:`\\mathbf{H}` for the mode of specified index
Args:
mode_idx: The mode index :math:`m \\leq M`
tm_2d: If the mode is using a 1d distribution, this specifies if the mode is TM (otherwise TE)
Returns:
:math:`\\mathbf{H}_m`, an :code:`ndarray` of the form :code:`(3, X, Y)` for mode :math:`m \\leq M`
"""
mode = self.modes[self._check_num_modes(mode_idx)]
if self.ndim == 1:
if tm_2d:
mode = np.hstack((self.o, mode, self.o))
else:
mode = np.hstack((1j * self.betas[mode_idx] * mode, self.o,
-(mode - np.roll(mode, 1, axis=0)) / self.solver.cells[0])) / (
1j * self.solver.k0)
return self.solver.reshape(mode)
def e(self, mode_idx: int = 0, tm_2d: bool = True) -> np.ndarray:
"""Electric field :math:`\\mathbf{E}` for the mode of specified index
Args:
mode_idx: The mode index :math:`m \\leq M`
tm_2d: If the mode is using a 1d distribution, this specifies if the mode is TM (otherwise TE)
Returns:
:math:`\\mathbf{E}_m`, an :code:`ndarray` of shape :code:`(3, X, Y, Z)` for mode :math:`m \\leq M`
"""
self._check_num_modes(mode_idx)
if self.ndim == 2:
return self.solver.h2e(self.h(mode_idx), self.betas[mode_idx])
else:
mode = self.modes[mode_idx]
if tm_2d:
mode = np.hstack((1j * self.betas[mode_idx] * mode, self.o,
-(np.roll(mode, -1, axis=0) - mode) / self.solver.cells[0])) / (
1j * self.solver.k0 * self.solver.eps_t.flatten())
else:
mode = np.hstack((self.o, mode, self.o))
return self.solver.reshape(mode)
def sz(self, mode_idx: int = 0) -> np.ndarray:
"""Poynting vector :math:`\\mathbf{S}_z` for the mode of specified index
Args:
mode_idx: The mode index :math:`m \\leq M`
Returns:
:math:`\\mathbf{S}_{m, z}`, the z-component of Poynting vector (correspoding to power),
of shape :code:`(X, Y)`
"""
self._check_num_modes(mode_idx)
return poynting_fn(2)(self.e(mode_idx), self.h(mode_idx)).squeeze()
def beta(self, mode_idx: int = 0) -> float:
"""Fundamental mode propagation constant :math:`\\beta` for mode indexed by :code:`mode_idx`.
Args:
mode_idx: The mode index :math:`m \\leq M`
Returns:
:math:`\\beta_m` for mode :math:`m \\leq M`
"""
return self.betas[self._check_num_modes(mode_idx)]
def n(self, mode_idx: int = 0):
"""Effective index :math:`n` for mode indexed by :code:`mode_idx`.
Returns:
The effective index :math:`n`
"""
return self.betas[self._check_num_modes(mode_idx)] / self.solver.k0
@property
def ns(self):
"""The refractive index for all modes corresponding to :code:`betas`.
Returns:
:math:`\\mathbf{n}`, an :code:`ndarray` for the refractive index of shape :code:`(M,)`
"""
return self.betas / self.solver.k0
@property
def dbeta(self):
return self.beta(0) - self.beta(1)
@property
def dn(self):
return (self.beta(0) - self.beta(1)) / self.solver.k0
def te_ratio(self, mode_idx: int = 0):
if self.ndim != 2:
raise AttributeError("ndim must be 2, otherwise te_ratio is 1 or 0.")
te_ratios = []
habs = np.abs(self.h(mode_idx).squeeze())
norms = np.asarray((np.linalg.norm(habs[0].flatten()), np.linalg.norm(habs[1].flatten())))
te_ratios.append(norms[0] ** 2 / np.sum(norms ** 2))
return np.asarray(te_ratios)
def plot_power(self, ax, idx: int = 0, title: str = "Power", include_n: bool = True,
title_size: float = 16, label_size=16):
"""Plot sz overlaid on the material
Args:
ax: Matplotlib axis handle.
idx: Mode index to plot.
title: Title of the plot/subplot.
include_n: Include the refractive index in the title.
title_size: Fontsize of the title.
label_size: Fontsize of the label.
"""
if idx > self.m - 1:
raise ValueError("Out of range of number of solutions")
if include_n:
ax.set_title(rf'{title}, $n_{idx + 1} = {self.n(idx):.4f}$', fontsize=title_size)
else:
ax.set_title(rf'{title}', fontsize=title_size)
if self.ndim == 2:
plot_power_2d(ax, np.abs(self.sz(idx).real), self.eps, spacing=self.solver.spacing[0])
ax.text(x=0.9, y=0.9, s=rf'$s_z$', color='white', transform=ax.transAxes, fontsize=label_size)
ratio = np.max((self.te_ratio(idx), 1 - self.te_ratio(idx)))
polarization = "TE" if np.argmax((self.te_ratio(idx), 1 - self.te_ratio(idx))) > 0 else "TM"
ax.text(x=0.05, y=0.9, s=rf'{polarization}[{ratio:.2f}]', color='white', transform=ax.transAxes)
else:
plot_field_1d(ax, np.abs(self.sz(idx).real), rf'Power',
self.eps, spacing=self.solver.spacing[0])
def _get_field_component(self, idx: int = 0, axis: Union[int, str] = 1, use_h: bool = True):
field = self.h(mode_idx=idx) if use_h else self.e(mode_idx=idx)
if idx > self.m - 1:
raise ValueError(f"Out of range of number of solutions {self.m}")
if not (axis in (0, 1, 2, 'x', 'y', 'z')):
raise ValueError(f"Axis expected to be (0, 1, 2) or ('x', 'y', 'z') but got {axis}.")
a = ['x', 'y', 'z'][axis] if isinstance(axis, int) else axis
axis = {'x': 0, 'y': 1, 'z': 2}[axis] if isinstance(axis, str) else axis
return field[axis].squeeze(), rf'$h_{a}$' if use_h else rf'$e_{a}$'
def plot_field(self, ax, idx: int = 0, axis: Union[int, str] = 1, use_h: bool = True, title: str = "Field",
include_n: bool = True, title_size: float = 16, label_size=16):
"""Plot field overlaid on the material.
Args:
ax: Matplotlib axis handle.
idx: Mode index to plot.
axis: Field axis to plot.
use_h: Plot magnetic field :math:`\\mathbf{H}`.
title: Title of the plot/subplot.
include_n: Include the refractive index in the title.
title_size: Fontsize of the title.
label_size: Fontsize of the label.
"""
if include_n:
ax.set_title(rf'{title}, $n_{idx + 1} = {self.n(idx):.4f}$', fontsize=title_size)
else:
ax.set_title(rf'{title}', fontsize=title_size)
field, label = self._get_field_component(idx, axis, use_h)
if self.ndim == 2:
plot_field_2d(ax, field.real.squeeze(), self.eps, spacing=self.solver.spacing[0])
ax.text(x=0.9, y=0.9, s=label, color='black', transform=ax.transAxes,
fontsize=label_size)
ratio = np.max((self.te_ratio(idx), 1 - self.te_ratio(idx)))
polarization = "TE" if np.argmax((self.te_ratio(idx), 1 - self.te_ratio(idx))) > 0 else "TM"
ax.text(x=0.05, y=0.9, s=rf'{polarization}[{ratio:.2f}]', color='black', transform=ax.transAxes)
else:
plot_field_1d(ax, field[axis].real.squeeze(), rf'Field({label})', self.eps, spacing=self.solver.spacing[0])
def phase(self, length: float = 1, mode_idx: int = 0):
"""Measure the phase delay propagated over a length
Args:
length: The length over which to propagate the mode
mode_idx: The mode idx to propagate
Returns:
The aggregate phase delay over a length.
"""
return self.solver.k0 * length * self.n(mode_idx)
def place(self, mode_idx: int, grid: YeeGrid, center: Size, size: Size) -> np.ndarray:
"""Place at mode_idx in device with :math:`shape` and :math:`region`.
Args:
mode_idx: Mode index to place.
grid: Finite-difference grid to place the mode.
center: Specified center for placement.
size: Specified size for placement.
Returns:
Places the mode into the provided grid at the requested center and size, with orientation of the mode
automatically determined from the center and size provided.
"""
region = grid.slice(center, size)
if self.ndim == 2:
# Find the place axis (the poynting direction, where the size should be 0)
place_axis = np.where(np.array(size) == 0)[0][0]
# Find the reorientation of field axes based on place_axis
# 0: (0, 1, 2) -> (2, 0, 1)
# 1: (0, 1, 2) -> (0, 2, 1)
# 2: (0, 1, 2) -> (0, 1, 2)
axes = [
np.asarray((2, 0, 1), dtype=int),
np.asarray((0, 2, 1), dtype=int),
np.asarray((0, 1, 2), dtype=int)
][place_axis]
x = np.zeros((3, *grid.shape), dtype=np.complex128)
x[(None,) + region] = self.h(mode_idx).transpose((0, *tuple(1 + axes)))
else:
x = np.zeros(grid.shape, dtype=np.complex128)
x[region[:2]] = self.modes[mode_idx]
return x
def measure_fn(self, mode_idx: int = 0, use_jax: bool = False, tm_2d: bool = True):
"""Measure flux provided a mode indexed at :code:`mode_index`.
Args:
mode_idx: Mode index for the measurement.
use_jax: Use jax.
tm_2d: Use TM polarization (only relevant in the case of 2D simulations (i.e., 1D modes)).
Returns:
A function that takes e, h fields and outputs the a and b terms
"""
poynting = poynting_fn(use_jax=use_jax)
em, hm = self.e(mode_idx, tm_2d=tm_2d), self.h(mode_idx, tm_2d=tm_2d)
sm = np.sum(poynting(em, hm))
xp = jnp if use_jax else np
def _measure(e, h):
a, b = xp.sum(poynting(e, hm)) / sm / 2, (xp.sum(poynting(em, h)) / sm).conj() / 2
return xp.array([a + b, a - b])
return _measure
def evolve(self, length: Union[float, np.ndarray], mode_weights: Tuple[float, ...] = (1,), use_h: bool = True):
"""Evolve a mode in time according to :code:`mode_weights`.
Args:
length: The length (or time) over which the mode is evolving. If a 1d array is provided,
output | |
import datetime
from urllib.parse import urlparse
import logging
import threading
import ujson
import grpc
from grpc._cython import cygrpc
from ..grpc_gen import milvus_pb2_grpc
from ..grpc_gen import milvus_pb2 as grpc_types
from .abstract import ConnectIntf, CollectionSchema, IndexParam, PartitionParam, TopKQueryResult, HEntitySet
from .prepare import Prepare
from .types import MetricType, Status
from .check import (
int_or_str,
is_legal_host,
is_legal_port,
)
from .asynch import SearchFuture, InsertFuture, CreateIndexFuture, CompactFuture, FlushFuture
from .hooks import BaseSearchHook
from .client_hooks import SearchHook, HybridSearchHook
from .exceptions import ParamError, NotConnectError
from ..settings import DefaultConfig as config
from . import __version__
LOGGER = logging.getLogger(__name__)
def error_handler(*rargs):
def wrapper(func):
def handler(self, *args, **kwargs):
record_dict = {}
try:
record_dict["API start"] = str(datetime.datetime.now())
if self._pre_ping:
self.ping()
record_dict["RPC start"] = str(datetime.datetime.now())
return func(self, *args, **kwargs)
except grpc.FutureTimeoutError as e:
record_dict["RPC timeout"] = str(datetime.datetime.now())
LOGGER.error("\nAddr [{}] {}\nRequest timeout: {}\n\t{}".format(self.server_address, func.__name__, e, record_dict))
status = Status(Status.UNEXPECTED_ERROR, message='Request timeout')
return status if not rargs else tuple([status]) + rargs
except grpc.RpcError as e:
record_dict["RPC error"] = str(datetime.datetime.now())
LOGGER.error("\nAddr [{}] {}\nRpc error: {}\n\t{}".format(self.server_address, func.__name__, e, record_dict))
status = Status(e.code(), message='Error occurred. {}'.format(e.details()))
return status if not rargs else tuple([status]) + rargs
except Exception as e:
record_dict["Exception"] = str(datetime.datetime.now())
LOGGER.error("\nAddr [{}] {}\nExcepted error: {}\n\t{}".format(self.server_address, func.__name__, e, record_dict))
status = Status(Status.UNEXPECTED_ERROR, message=str(e))
return status if not rargs else tuple([status]) + rargs
return handler
return wrapper
def set_uri(host, port, uri):
if host is not None:
_port = port if port is not None else config.GRPC_PORT
_host = host
elif port is None:
try:
_uri = urlparse(uri) if uri else urlparse(config.GRPC_URI)
_host = _uri.hostname
_port = _uri.port
except (AttributeError, ValueError, TypeError) as e:
raise ParamError("uri is illegal: {}".format(e))
else:
raise ParamError("Param is not complete. Please invoke as follow:\n"
"\t(host = ${HOST}, port = ${PORT})\n"
"\t(uri = ${URI})\n")
if not is_legal_host(_host) or not is_legal_port(_port):
raise ParamError("host or port is illeagl")
return "{}:{}".format(str(_host), str(_port))
# def connect(addr, timeout):
# channel = grpc.insecure_channel(
# addr,
# options=[(cygrpc.ChannelArgKey.max_send_message_length, -1),
# (cygrpc.ChannelArgKey.max_receive_message_length, -1)]
# )
# try:
# ft = grpc.channel_ready_future(channel)
# ft.result(timeout=timeout)
# return True
# except grpc.FutureTimeoutError:
# raise NotConnectError('Fail connecting to server on {}. Timeout'.format(addr))
# except grpc.RpcError as e:
# raise NotConnectError("Connect error: <{}>".format(e))
# # Unexpected error
# except Exception as e:
# raise NotConnectError("Error occurred when trying to connect server:\n"
# "\t<{}>".format(str(e)))
# finally:
# ft.cancel()
# ft.__del__()
# channel.__del__()
class GrpcHandler(ConnectIntf):
def __init__(self, host=None, port=None, pre_ping=True, **kwargs):
self._channel = None
self._stub = None
self._uri = None
self.status = None
self._connected = False
self._pre_ping = pre_ping
# if self._pre_ping:
self._max_retry = kwargs.get("max_retry", 3)
# record
self._id = kwargs.get("conn_id", 0)
# condition
self._condition = threading.Condition()
self._request_id = 0
# client hook
self._search_hook = SearchHook()
self._hybrid_search_hook = HybridSearchHook()
self._search_file_hook = SearchHook()
# set server uri if object is initialized with parameter
_uri = kwargs.get("uri", None)
self._setup(host, port, _uri, pre_ping)
def __str__(self):
attr_list = ['%s=%r' % (key, value)
for key, value in self.__dict__.items() if not key.startswith('_')]
return '<Milvus: {}>'.format(', '.join(attr_list))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def _setup(self, host, port, uri, pre_ping=False):
"""
Create a grpc channel and a stub
:raises: NotConnectError
"""
self._uri = set_uri(host, port, uri)
self._channel = grpc.insecure_channel(
self._uri,
options=[(cygrpc.ChannelArgKey.max_send_message_length, -1),
(cygrpc.ChannelArgKey.max_receive_message_length, -1),
('grpc.enable_retries', 1),
('grpc.keepalive_time_ms', 55000)]
# (b'grpc.enable_http_proxy', 0)]
)
self._stub = milvus_pb2_grpc.MilvusServiceStub(self._channel)
self.status = Status()
def _pre_request(self):
if self._pre_ping:
self.ping()
def _get_request_id(self):
with self._condition:
_id = self._request_id
self._request_id += 1
return _id
def set_hook(self, **kwargs):
"""
specify client hooks.
The client hooks are used in methods which interact with server.
Use key-value method to set hooks. Supported hook setting currently is as follow.
search hook,
search-in-file hook
"""
# config search hook
_search_hook = kwargs.get('search', None)
if _search_hook:
if not isinstance(_search_hook, BaseSearchHook):
raise ParamError("search hook must be a subclass of `BaseSearchHook`")
self._search_hook = _search_hook
_search_file_hook = kwargs.get('search_in_file', None)
if _search_file_hook:
if not isinstance(_search_file_hook, BaseSearchHook):
raise ParamError("search hook must be a subclass of `BaseSearchHook`")
self._search_file_hook = _search_file_hook
def ping(self, timeout=30):
ft = grpc.channel_ready_future(self._channel)
retry = self._max_retry
try:
while retry > 0:
try:
ft.result(timeout=timeout)
return True
except:
retry -= 1
LOGGER.debug("Retry connect addr <{}> {} times".format(self._uri, self._max_retry - retry))
if retry > 0:
continue
else:
LOGGER.error("Retry to connect server {} failed.".format(self._uri))
raise
except grpc.FutureTimeoutError:
raise NotConnectError('Fail connecting to server on {}. Timeout'.format(self._uri))
except grpc.RpcError as e:
raise NotConnectError("Connect error: <{}>".format(e))
# Unexpected error
except Exception as e:
raise NotConnectError("Error occurred when trying to connect server:\n"
"\t<{}>".format(str(e)))
@property
def server_address(self):
"""
Server network address
"""
return self._uri
def server_version(self, timeout=30):
"""
Provide server version
:return:
Status: indicate if operation is successful
str : Server version
:rtype: (Status, str)
"""
return self._cmd(cmd='version', timeout=timeout)
def server_status(self, timeout=30):
"""
Provide server status
:return:
Status: indicate if operation is successful
str : Server version
:rtype: (Status, str)
"""
return self._cmd(cmd='status', timeout=timeout)
@error_handler(None)
def _cmd(self, cmd, timeout=30):
cmd = Prepare.cmd(cmd)
rf = self._stub.Cmd.future(cmd, wait_for_ready=True, timeout=timeout)
response = rf.result()
if response.status.error_code == 0:
return Status(message='Success!'), response.string_reply
return Status(code=response.status.error_code, message=response.status.reason), None
@error_handler()
def create_collection(self, collection_name, dimension, index_file_size, metric_type, param, timeout=30):
"""
Create collection
:type param: dict or TableSchema
:param param: Provide collection information to be created
`example param={'collection_name': 'name',
'dimension': 16,
'index_file_size': 1024 (default),
'metric_type': Metric_type.L2 (default)
}`
`OR using Prepare.collection_schema to create param`
:param timeout: timeout, The unit is seconds
:type timeout: double
:return: Status, indicate if operation is successful
:rtype: Status
"""
collection_schema = Prepare.collection_schema(collection_name, dimension, index_file_size, metric_type, param)
rf = self._stub.CreateCollection.future(collection_schema, wait_for_ready=True, timeout=timeout)
status = rf.result()
if status.error_code == 0:
return Status(message='Create collection successfully!')
LOGGER.error(status)
return Status(code=status.error_code, message=status.reason)
@error_handler()
def create_hybrid_collection(self, collection_name, fields, timeout=30):
collection_schema = Prepare.collection_hybrid_schema(collection_name, fields)
response = self._stub.CreateHybridCollection(collection_schema)
if response.error_code == 0:
return Status(message='Create collection successfully!')
return Status(code=response.error_code, message=response.reason)
@error_handler(False)
def has_collection(self, collection_name, timeout=30, **kwargs):
"""
This method is used to test collection existence.
:param collection_name: collection name is going to be tested.
:type collection_name: str
:param timeout: time waiting for server response
:type timeout: int
:return:
Status: indicate if vectors inserted successfully
bool if given collection_name exists
"""
collection_name = Prepare.collection_name(collection_name)
rf = self._stub.HasCollection.future(collection_name, wait_for_ready=True, timeout=timeout)
reply = rf.result()
if reply.status.error_code == 0:
return Status(), reply.bool_reply
return Status(code=reply.status.error_code, message=reply.status.reason), False
@error_handler(None)
def describe_collection(self, collection_name, timeout=30, **kwargs):
"""
Show collection information
:type collection_name: str
:param collection_name: which collection to be shown
:returns: (Status, collection_schema)
Status: indicate if query is successful
collection_schema: return when operation is successful
:rtype: (Status, TableSchema)
"""
collection_name = Prepare.collection_name(collection_name)
rf = self._stub.DescribeCollection.future(collection_name, wait_for_ready=True, timeout=timeout)
response = rf.result()
if response.status.error_code == 0:
collection = CollectionSchema(
collection_name=response.collection_name,
dimension=response.dimension,
index_file_size=response.index_file_size,
metric_type=MetricType(response.metric_type)
)
return Status(message='Describe collection successfully!'), collection
LOGGER.error(response.status)
return Status(code=response.status.error_code, message=response.status.reason), None
@error_handler(None)
def count_collection(self, collection_name, timeout=30, **kwargs):
"""
obtain vector number in collection
:type collection_name: str
:param collection_name: target collection name.
:returns:
Status: indicate if operation is successful
res: int, collection row count
"""
collection_name = Prepare.collection_name(collection_name)
rf = self._stub.CountCollection.future(collection_name, wait_for_ready=True, timeout=timeout)
response = rf.result()
if response.status.error_code == 0:
return Status(message='Success!'), response.collection_row_count
return Status(code=response.status.error_code, message=response.status.reason), None
@error_handler([])
def show_collections(self, timeout=30):
"""
Show all collections information in database
:return:
Status: indicate if this operation is successful
collections: list of collection names, return when operation
is successful
:rtype:
(Status, list[str])
"""
cmd = Prepare.cmd('show_collections')
rf = self._stub.ShowCollections.future(cmd, wait_for_ready=True, timeout=timeout)
response = rf.result()
if response.status.error_code == 0:
return Status(message='Show collections successfully!'), \
[name for name in response.collection_names if len(name) > 0]
return Status(response.status.error_code, message=response.status.reason), []
@error_handler(None)
def show_collection_info(self, collection_name, timeout=30):
request = grpc_types.CollectionName(collection_name=collection_name)
rf = self._stub.ShowCollectionInfo.future(request, wait_for_ready=True, timeout=timeout)
response = rf.result()
rpc_status = response.status
if rpc_status.error_code == 0:
json_info = response.json_info
return Status(), {} if not json_info else ujson.loads(json_info)
return Status(rpc_status.error_code, rpc_status.reason), None
@error_handler()
def preload_collection(self, collection_name, timeout=None):
"""
Load collection to cache in advance
:type collection_name: str
:param collection_name: collection to preload
:returns:
Status: indicate if invoke is successful
"""
collection_name = Prepare.collection_name(collection_name)
status = self._stub.PreloadCollection.future(collection_name, wait_for_ready=True, timeout=timeout).result()
return Status(code=status.error_code, message=status.reason)
@error_handler()
def reload_segments(self, collection_name, segment_ids, timeout=30):
file_ids = list(map(int_or_str, segment_ids))
request = Prepare.reload_param(collection_name, file_ids)
status = self._stub.ReloadSegments.future(request, wait_for_ready=True, timeout=timeout).result()
return Status(code=status.error_code, message=status.reason)
@error_handler()
def drop_collection(self, collection_name, timeout=20):
"""
Delete collection with collection_name
:type collection_name: str
:param collection_name: Name of the collection being deleted
:return: Status, indicate if operation is successful
:rtype: Status
"""
collection_name = Prepare.collection_name(collection_name)
rf = self._stub.DropCollection.future(collection_name, wait_for_ready=True, timeout=timeout)
status = rf.result()
if status.error_code == 0:
return Status(message='Delete collection successfully!')
return Status(code=status.error_code, message=status.reason)
@error_handler([])
def insert(self, collection_name, | |
for self.all_letters
def find_negative_letters(num_string):
return ''.join([i for i in self.all_letters if i not in num_string])
#Returns True iff string1 contains all letters in string2
def contains(num_string1,num_string2):
for i in num_string2:
if i not in num_string1:
return False
return True
#Removes a sorted string from a set
def discard(this_set,item):
item = "".join(sorted(item))
return this_set.discard(item)
#Program Logic:
#So we can classify 1,4,7,8 Given these, what else can we classify?
# 3 is the only len 5 that uses all the segments of 1
# 9 is the only len 6 that uses all segments of 4
# 2 is the only len 5 remaining that uses the only letter missing from 9
# 5 is the other len 5 and is also missing this same letter missing from 9
# 0 contains both letters missing from 5
# 6 contains one letter missing from 5
segments,output = segment_tuple
#combine both lists
segments.extend(output)
#Sort in alphabetical order and create a set of length 10
remaining_segments = set(["".join(sorted(i)) for i in segments])
#Each of these has a uniqeu length
one = list(filter(lambda x : len(x) == 2, remaining_segments))[0]
four = list(filter(lambda x : len(x) == 4, remaining_segments))[0]
seven = list(filter(lambda x : len(x) == 3, remaining_segments))[0]
eight = list(filter(lambda x : len(x) == 7, remaining_segments))[0]
#Discard from remaining_segments
for num in [one,four,seven,eight]:
discard(remaining_segments,num)
#Find 3 - the only len 5 that uses all the segments of 1
for num in remaining_segments:
if len(num) == 5:
if contains(num,one):
three = num
discard(remaining_segments,three)
#Find 9 - the only len 6 that uses all segments of 4
for num in remaining_segments:
if len(num) == 6:
if contains(num,four):
nine = num
discard(remaining_segments,nine)
#Find 2 - the only len 5 remaining that uses the only letter missing from 9
for num in remaining_segments:
if len(num) == 5:
nine_negative = find_negative_letters(nine)
if contains(num,nine_negative):
two = num
discard(remaining_segments,two)
#Find 5 - the other len 5
five = list(filter(lambda x : len(x) == 5, remaining_segments))[0]
discard(remaining_segments,five)
#Find 0 - contains both letters missing from 5
zero = list(filter(lambda x : contains(x,find_negative_letters(five)), remaining_segments))[0]
discard(remaining_segments,zero)
#Six is last one remaining
six = remaining_segments[0]
discard(remaining_segments,six)
if len(remaining_segments) != 0:
print(f"Error: remaining_segments has {len(remaining_segments)} items remaining: {remaining_segments}")
import sys
sys.exit(1)
#Construct local key to convert string to digits {'abcde':0,'ef':1,...}
key = dict()
count = 0
for i in [zero,one,two,three,four,five,six,seven,eight,nine]:
key[i] = count
count += 1
#Extract all digits of output list, sorted in alphabetical order and convert to int via key
d1,d2,d3,d4 = [ key["".join(sorted(i))] for i in output]
#manually construct total
final_number = 1000*d1 + 100*d2 + 10*d3 + d4
#Send total to count_unique_output to satisfy Part 1
self.count_unique_output(final_number)
#Save all totals to the totals list for access later
self.totals.append(final_number)
def count_unique_output(self,total):
count = 0
uniq_len = ["1","4","7","8"]
#Count all unique length digits in the total.
for l in uniq_len:
if l in str(total):
count += 1
self.uniq_count += count
class OctopusNavigator:
#Octopus Navigator class takes in a 10x10 grid of octopus power levels
def __init__(self):
#Counts the number of flashes total
self.flash_count = 0
#Counts the number of steps taken
self.steps = 0
#Records the first time all octopi flash at the same time.
self.first_simultaenous_flash_step = 0
def initialize_octopi(self, seed_strings):
#Seed self.grid with a numpy array consisting of all the initial seeds
all_rows = list()
for line in seed_strings:
row = [o for o in line.strip()]
all_rows.append(row)
#Initialize seed strings into numpy array
self.grid = numpy.array(all_rows,numpy.int32)
#Keep track of x_max and y_max for looping
self.x_max = len(self.grid[0])
self.y_max = len(self.grid)
def move_one_step(self):
print(f"Beginning Step {self.steps + 1}")
#First, add 1 to each energy level (all values)
self.grid += 1
flashed_octopi = set() #initialize structure to track octopi that have already flashed this round
#This while loop is the core of the logic. While an octopus is > 9 and hasn't flashed already, do flashing ops
while self.has_greater_than_nine(flashed_octopi):
flashed_octopi = self.do_flashes(flashed_octopi)
#Zero records greater than 9 that haven't already flashed.
#Zero out each of these grid squares.
for x,y in flashed_octopi:
self.grid[x,y] = 0
#Increment Step
self.steps += 1
print(f"Step {self.steps} complete.")
#Check if self.grid is all zeros
zeros = numpy.array([[[0] * self.x_max] * self.y_max],numpy.int32)
if numpy.array_equiv(zeros,self.grid):
self.first_simultaenous_flash_step = self.steps
print(f"All octopi flashed at step {self.steps}!")
def do_flashes(self, flashed_octopi):
#Iterate through self.grid array and increment neighbors if an octopus is > 9 power
for x in range(0,self.x_max):
for y in range(0,self.y_max):
coord = (x,y)
#Flash if not flashed already
if self.grid[x,y] > 9 and coord not in flashed_octopi:
#Find a set of neighboring tuples
neighbors = self.find_adjacent_coords(x,y)
#Increment neighbors
for i,j in neighbors:
self.grid[i,j] += 1
#Increment total flash count
self.flash_count += 1
#Add to flashed set / return flashed set
flashed_octopi.add(coord)
return flashed_octopi
def has_greater_than_nine(self, flashed_octopi):
#function iterates and determines if there exists an octopus that
# is greater than 9 power that hasn't already flashed
for x in range(0,self.x_max):
for y in range(0,self.y_max):
if self.grid[x,y] > 9 and (x,y) not in flashed_octopi:
return True
return False
def find_adjacent_coords(self,x,y):
#This is a really nice way to determine neighbors of a coordinate. This one return a
# set of tuples neighboring a point x,y
neighbors = set()
for i,j in [(0,1),(0,-1),(1,0),(-1,0),(1,1),(-1,-1),(1,-1),(-1,1)]:
if x+i == self.x_max or x + i < 0:
i = 0
if y+j == self.y_max or y + j < 0:
j = 0
neighbors.add((x + i,y + j))
neighbors.discard((x,y))
return neighbors
class CaveGraph:
def __init__(self):
self.graph = dict() #Adjacency graph is a dict which has keys of start points and
# values are a list of end points. Basically a bunch of nodes and edges
#initialize counters and constants. Don't need self.big_caves, but may need later
self.small_caves = list()
self.big_caves = list()
self.path_count = 0
def load_graph_data(self,lines):
#Input a list of newline-separated edgest
#Load vectors into adjacency graph
for line in lines:
line = line.strip()
start,end = line.split('-')
#If not already in the graph, add as a key and initialize to set()
if start not in self.graph.keys():
self.graph[start] = set()
if end not in self.graph.keys():
self.graph[end] = set()
#These paths should be bi-directional
self.graph[start].add(end)
self.graph[end].add(start)
for i in self.graph.keys():
#Sorting each so they're in the same order when debugging
self.graph[i] = list(self.graph[i])
self.graph[i].sort()
#Add all the things to big_caves and small_caves attributes
[self.big_caves.append(i) for i in self.graph.keys() if i.isupper()]
[self.small_caves.append(i) for i in self.graph.keys() if i.islower()]
def walk_next_path(self,current_path = ["start"],can_visit_a_small_cave_twice=False):
#This function does the heavy lifting.
#Recursive function that walks the graph, given a "current_path", determine
# (1) Has the path ended?
# (2) Have we broken any of the rules?
# (3) Can we continue adding to the path?
#Track the last node to determine where we can go
last_node = current_path[-1]
#loop through all nodes that we can go
for next_node in self.graph[last_node]:
#Add the potential node to the path
current_path.append(next_node)
if next_node == "end":
#We've reached the exit. Count as valid path
self.path_count += 1
current_path.pop()
continue
elif next_node == "start":
#We're back at the start. This is illegal. Pop/continue
current_path.pop()
continue
elif next_node in self.small_caves and next_node in current_path[:-1]:
#We reached a small cave where we've been before.
if can_visit_a_small_cave_twice:
#This is true when its not necessarily illegal to have been to a small cave twice
#This function returns all repeat small caves in the path ["c","c"]
twice_caves = self.caves_already_visited_twice(current_path)
#If more than 2 (3 or 4) we have three small or two and two small (both illegal)
if len(twice_caves) > 2:
#Pop/Continue
current_path.pop()
continue
#If not more than one we can add to the path.
else:
#Walk the path and keep the rules the same.
self.walk_next_path(current_path, can_visit_a_small_cave_twice=can_visit_a_small_cave_twice)
current_path.pop()
continue
else:
#Case for when we've reached a dead-end small cave (already visited)
current_path.pop()
continue
else:
# Here, we can add to the current path and recurse further, keeping the rules the same
self.walk_next_path(current_path, can_visit_a_small_cave_twice=can_visit_a_small_cave_twice)
current_path.pop()
continue
def caves_already_visited_twice(self,current_path):
#Helper stub to return a list of repeated small caves within an array
repeated_small_caves = list()
for cave in current_path:
if cave in self.small_caves:
#Check if cave is small
#Only append to repeated_small_caves if the count is greater than 1
count = sum(1 for i in current_path if i == cave)
if count > 1:
repeated_small_caves.append(cave)
return repeated_small_caves
class ThermalCamera:
def __init__(self):
self.dots = list()
self.folds = list()
def activate(self,lines):
#Initialize Data Structures and do folds
self.parse_manual(lines)
#Finds the max fold value and doubles and adds 1 for max coords
self.x_max = max([j for i,j in self.folds if i == 'x']) * 2 + 1
self.y_max = max([j for i,j in self.folds if i == 'y']) * 2 + 1
#Initialize numpy array with all 0s
self.grid = numpy.array([[0] * self.y_max] * self.x_max,numpy.int32)
#For each dot, make the (x,y) value equal to 1
for dot in self.dots:
self.grid[dot] = 1
def parse_manual(self,lines):
#Parse through each line
for line in lines:
line = line.strip()
if "fold along" in line:
#Track folds as a tuple ("x",12) in self.folds
axis,fold_val = line.split(" ")[2].split('=')
self.folds.append((axis,int(fold_val)))
elif ',' in line:
#Track dots as an (x,y) tuple in self.dots
x,y = line.split(',')
self.dots.append((int(x),int(y)))
else:
#Newline or EOF
pass
def do_folds(self, fold_count=None):
#Helper function to determine which fold.
for axis,fold_val in self.folds:
# print(f"Folding across the {axis}={fold_val} axis")
# print(f"Currently the grid is {self.grid.shape} with x_max of {self.x_max} and y_max of {self.y_max}")
#Fold over x=4 means vertical fold (fold left)
if axis == "x":
temp_grid = self.fold_left(fold_val)
#Fold over y=4 means horiz fold (fold up)
elif axis == "y":
temp_grid = self.fold_up(fold_val)
#Case not reached during testing
else:
print(f"Error: unexpected value for {axis} in folds: {folds}")
sys.exit(1)
#Assign temp_grid to self.grid and calculate x_max and y_max
self.grid = temp_grid
self.x_max,self.y_max = self.grid.shape
#logic to stop | |
= self.zenith_cut.get_bounds()[1]
if not self.quiet:
if exp_radius == 180:
print 'Constructing all-sky livetime cube'
else:
print('Constructing livetime cube about RA,Dec = ({0:0.3f},{1:0.3f}) with a radius of {2:0.3f} deg.'.format(roi_dir.ra(),roi_dir.dec(),exp_radius))
for i in xrange(1+self.use_weighted_livetime):
#print('on iteration {0}'.format(i))
sys.stdout.flush()
lt = skymaps.LivetimeCube(
cone_angle =exp_radius,
dir =roi_dir,
zcut =np.cos(np.radians(zenithcut)),
pixelsize =self.livetime_pixelsize,
quiet =self.quiet,
weighted =i)
for hf in self.ft2files:
if not self.quiet: print('checking FT2 file {0}...'.format(hf)),
lt_gti = skymaps.Gti(hf,'SC_DATA')
if not ((lt_gti.maxValue() < self.gti.minValue()) or
(lt_gti.minValue() > self.gti.maxValue())):
lt.load(hf,self.gti)
if not self.quiet: print 'loaded'
else:
if not self.quiet: print 'not in Gti range'
# write out ltcube
extension = 'WEIGHTED_EXPOSURE' if i else 'EXPOSURE'
lt.write(self.ltcube,extension,not bool(i))
if self.dss is not None:
self.dss.write(self.ltcube,header_key=0)
# write some info to livetime file
f = pyfits.open(self.ltcube)
f[0]._header['RADIUS'] = exp_radius
f[0]._header['PIXSIZE'] = self.livetime_pixelsize
f.writeto(self.ltcube,overwrite=True)
f.close()
def _get_GTI(self):
""" Apply GTI cuts and get resulting merged GTI."""
# get GTI for a set of FT1 files
gti = skymaps.Gti(self.ft1files[0])
for ef in self.ft1files[1:]:
gti.combine(skymaps.Gti(ef))
# apply mask if specified
if self.gti_mask is not None:
before = gti.computeOntime()
gti.intersection(self.gti_mask)
if not self.quiet:
print('applied gti mask, before, after times: {0:.1f}, {1:.1f}'
.format(before, gti.computeOntime()))
return gti
def _Data_setup(self):
""" Set static variables in Data, GTI, etc. """
zenithcut = self.zenith_cut.get_bounds()[1]
thetacut = self.theta_cut.get_bounds()[1]
event_class = self.event_class_cut.get_bounds()[0]
pointlike.Data.set_class_level(event_class)
pointlike.Data.set_zenith_angle_cut(zenithcut)
pointlike.Data.set_theta_cut(thetacut)
pointlike.Data.set_use_mc_energy(self.mc_energy)
pointlike.Data.set_Gti_mask(self.gti)
print 'using Gti for creating binned photon file', self.gti
def check_consistency(self,other):
"""Check compatibility to combine with another DataSpec
In order to be considered compatible for combination the two DataSpec
instances must have:
consistent DSS entries
consistent binning (bins/decade, and MC specs)
consistent livetime specifications (pixelsize, radius, weighting)
If incompatible, return an instance of DataError which can then be
raised if desired, else return None.
"""
if self.dss!=other.dss:
return DataError("DataSpec instances have inconsistent DSS keywords")
if self.binsperdec!=other.binsperdec:
return DataError("DataSpec instances have inconsistent binning")
if self.livetime_pixelsize!=other.livetime_pixelsize:
return DataError("DataSpec instances have inconsistent livetime pixelsize")
if self.livetime_buffer!=other.livetime_buffer:
return DataError("DataSpec instances have inconsistent livetime buffer")
if self.use_weighted_livetime!=other.use_weighted_livetime:
return DataError("DataSpec instances have inconsistent use_weighted_livetime")
if self.mc_energy!=other.mc_energy:
return DataError("DataSpec instances have inconsistent mc_energy")
if self.mc_src_id!=other.mc_src_id:
return DataError("DataSpec instances have inconsistent mc_src_id")
return
def add(self,others,output,binfile,ltcube):
"""Combine this DataSpec instance with another and return the result
The two instances must have consistent definitions (DSS, binning,
and livetime), and must have non-overlapping Gtis. The binfiles and
ltcubes will be combined and written to the provided destinations;
perhaps in the future, I will come up with sensible defaults.
"""
if not hasattr(others,'__iter__'):
others = [others]
for other in others:
exc = self.check_consistency(other)
if exc is not None:
raise(exc)
binfile = os.path.expandvars(binfile)
ltcube = os.path.expandvars(ltcube)
gti = skymaps.Gti(self.gti)
ft1 = self.ft1files
ft2 = self.ft2files
bpd = skymaps.BinnedPhotonData(self.binfile)
for other in others:
gti.intersection(other.gti)
ft1 += other.ft1files
ft2 += other.ft2files
bpd.add(skymaps.BinnedPhotonData(other.binfile))
if gti.computeOntime()>0:
raise DataError("DataSpec instances have overlapping GTIs")
ft2 = sorted(list(set(ft2)))
bpd.write(binfile)
fitstools.merge_lt([self.ltcube]+[other.ltcube for other in others],
outfile=ltcube,weighted=self.use_weighted_livetime)
dssman.DSSEntries(self.binfile,header_key=0).write(binfile,header_key=0)
dssman.DSSEntries(self.ltcube,header_key=0).write(ltcube,header_key=0)
#TODO: move the copying of DSS entries into the merge_bpd and merge_lt functions
gti_mask = skymaps.Gti(self.gti_mask)
for other in others:
gti_mask.combine(other.gti_mask)
kwargs = dict(ft1=ft1,
ft2=ft2,
binfile=binfile,
ltcube=ltcube,
gti_mask = gti_mask,
binsperdec=self.binsperdec,
mc_src_id=self.mc_src_id,
mc_energy=self.mc_energy,
use_weighted_livetime = self.use_weighted_livetime,
livetime_buffer = self.livetime_buffer,
livetime_pixelsize = self.livetime_pixelsize,
clobber = False)
return DataSpec(output,**kwargs)
#TODO -- replace/modify ExposureManager
def __call__(self):
"""Return a DataManager created from this DataSpec"""
return DataManager(self)
class DataManager(object):
"""A wrapper class holding references to data objects
From the filenames specified in a DataSpec, loads and holds references
to a BinnedPhotonData (stored in the bpd member) and a LivetimeCube (or
two, if there's a weighted one; stored as lt and weighted_lt).
"""
def __init__(self,ds):
"""Initialize a DataManager instance.
ds: a DataSpec instance
"""
self.dataspec = ds
self.bpd = skymaps.BinnedPhotonData(ds.binfile)
self.lt = skymaps.LivetimeCube(ds.ltcube,weighted=False)
if ds.use_weighted_livetime:
self.weighted_lt = skymaps.LivetimeCube(ds.ltcube,weighted=True)
else:
self.weighted_lt = None
self.gti = self.lt.gti() #Just to provide a reference.
@property
def dmap(self):
"""Alias for backward compatibility"""
warnings.warn(DeprecationWarning('DataManager.bpd is the preferred name for the BinnedPhotonData object'))
return self.bpd
class DataSet(object):
"""A helper class to manage DataSpecs
The basic function of this class is to retrieve pickled DataSpecs from a
standard directory structure. If a dataset contains multiple DataSpecs
(e.g. for individual months), this class will store and provide access to
the list. It can also be used to manage one or more pre-loaded DataSpecs.
"""
defaults = (('pickle',None,
'''A filename, list of filenames, or wildcard expression
indicating a set of pickled DataSpecs''')
,('dataspec',None,'One or more DataSpec instances')
,('data_dir',os.path.join('$FERMI','data'),
'Path to main data directory')
,('clobber',False,'If True, overwrite existing DataSet pickle')
)
@keyword_options.decorate(defaults)
def __init__(self,name=None,**kwargs):
"""Instantiate a DataSet
If name is not None, it will be converted to a filename as
os.path.expandvars(os.path.join('$FERMI','data',name.replace(' ','_')+'.pickle')).
If this file exists, it will be taken to be a pickled DataSpec or
list of DataSpec pickles. If it does not exist, it will be created with
the appropriate format for reuse with this class. If the $FERMI
environment variable is not set, files will be looked up and saved in
the current working directory.
Passing a value for name is the preferred use. If name is None, the
user must provide a value for either pickle or dataspec but NOT both.
In this case, the instance will manage the DataSpecs specified by
whichever of those kwargs is used, but will not be saved for reuse.
A single file passed via the pickle kwarg should point to either a
pickled DataSpec, or a pickled list of DataSpec pickle files. A list of
filenames, or a wildcard that matches a list, should each point to
a pickled DataSpec.
"""
keyword_options.process(self,kwargs)
self.name = name
self.filename = self._parse_name(name)
if name is None or not os.path.exists(self.filename):
if self.pickle is None and self.dataspec is None:
raise DataError("Must specify either pickle files or DataSpecs")
if os.path.exists(self.filename or '') and not self.clobber:
self.dataspec = self._load_files(self.filename)
else:
if self.pickle is not None:
if self.dataspec is not None:
raise DataError("Must specify dataspec OR pickle, not both.")
self.dataspec = self._load_files(self.pickle)
if self.filename is not None:
dump(self.dataspec,open(self.filename,'w'))
def _parse_name(self,name):
"""Return the filename corresponding to a DataSet name"""
if name is None: return name
data_dir = os.path.expandvars(self.data_dir)
if data_dir=="$FERMI/data":
#$FERMI undefined
print("$FERMI environment variable not set - using current directory")
data_dir = os.getcwd()
if not os.path.exists(data_dir):
os.mkdir(data_dir)
basename = name.replace(' ','_')
#in case we're parsing the name of a pickle file
if not basename.endswith('.pickle'):
basename = '.'.join([basename,'pickle'])
return os.path.join(data_dir,basename)
def _load_files(self,pfile):
"""Load a pickle file and return the resulting DataSpec(s)"""
if hasattr(pfile,'__iter__'):
return [self._load_files(pf) for pf in pfile]
pfile = os.path.expandvars(pfile)
if not os.path.isabs(pfile):
pfile = self._parse_name(pfile)
gfiles = sorted(glob.glob(pfile))
if len(gfiles)==0:
raise DataError("No matches found for wildcard {0}".format(pfile))
elif len(gfiles)>1:
return [self._load_files(gf) for gf in gfiles]
else:
pfile = gfiles[0]
pdat = load(open(pfile))
if isinstance(pdat,DataSpec):
return pdat
elif hasattr(pdat,'__iter__'):
if isinstance(pdat[0],DataSpec):
return pdat
return self._load_files(pdat)
else:
#temporary kludge for my local version
try:
from users.kerrm import tools
if isinstance(pdat,tools.dataman.DataSpec):
return pdat
except ImportError:
pass
raise DataError("Invalid DataSet pickle: {0}".format(pfile))
def __getitem__(self,i):
if hasattr(self.dataspec,'__iter__'):
return self.dataspec[i]
else:
raise DataError('DataSet only contains one DataSpec')
def __getattr__(self,x):
return getattr(self.dataspec,x)
def __call__(self,month=0):
if not hasattr(self.dataspec,'__iter__'):
return self.dataspec()
else:
return self.dataspec[month]()
raise TypeError('"DataSet" object is not callable.')
class DataError(Exception):
"""Exception class for data management errors"""
pass
def combine_ltcubes(ff, outfile=None):
""" Create a combined light cube file
ff : list of filenames
outfile : optional name write to
"""
class Expose(object):
def __init__(self, hdus):
self.hdus=hdus
# convert the data to a 2-d array for fast sum
self.edata = np.vstack([np.array(x) for x in hdus['EXPOSURE'].data])
self.gti=hdus['GTI']
self.gti_data = hdus['GTI'].data
self.gti_cols = hdus['GTI'].columns
self.ontime = self.gti.header['ONTIME']
self.telapse = self.gti.header['TELAPSE']
def add(self, other):
self.edata += other.edata
self.gti_data = np.hstack([self.gti_data, other.gti_data])
self.ontime += other.ontime
self.telapse+= other.telapse
def make_hdus(self):
# generate total exposure hdu
exposure_hdu =fits.BinTableHDU.from_columns(
[fits.Column(name='COSBINS', format='40E', unit='s', array=self.edata)],
name='EXPOSURE')
# total GTI hdu
d =[fits.Column(name=name, format='D',unit='s',
array=self.gti_data[name]) for name in ['START', 'STOP']]
gti_hdu = fits.BinTableHDU.from_columns(d, name='GTI')
a,b = [self.gti_data[name] for name in ('START','STOP')]
gti_hdu.header['ONTIME'] = sum(b-a)
gti_hdu.header['TELAPSE'] = b[-1]-a[0]
return [self.hdus['PRIMARY'], exposure_hdu, gti_hdu ]
def writeto(self, filename, overwrite=True):
fits.HDUList(self.make_hdus()).writeto(filename, overwrite=overwrite)
expsum | |
from archipelago import archipelago
from archipelago.setup import main_setup, parl_init_TWFY, parl_init_GOV
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import unittest
import sqlite3
import json
from lxml import etree
import os
# ----------------------------- ARCHIPELAGO TESTS -----------------------------
#
# The tests of archipelago are divided into four sections:
# - Fetching data (in whatever form for whatever source)
# - Building data (restructuring data from an external source into a form used
# by archipelago)
# - Loading data (into the archipelago database)
# - Accessing data (from the archipelage database)
# These are all tested in separate classes and maybe moved to separate files
# if they swell to beyond a manageable size
#
# ==============================================================================
class TestFetchDataMethods(unittest.TestCase):
def test_constituencies_TWFYjson_api(self):
'''FETCH:: Test the TFWY API functions in returning consistuencies'''
request_data = parl_init_TWFY.fetch_data_online('getConstituencies', output='json')
test_reference = [
{"name" : "Aberavon"},
{"name" : "Aberconwy"},
{"name" : "Aberdeen North"},
{"name" : "Aberdeen South"}
]
# test initial records
self.assertEqual( request_data[0:4], test_reference )
# test number of responses
self.assertEqual( len(request_data), 650)
# test encoding of accents in unicode
self.assertEqual( request_data[-3]["name"], u'Ynys M\xf4n') #Ynys Mon w circumflex
def test_mp_and_office_TWFYjson_api(self):
'''FETCH:: Test the TFWY API returns the correct number of MPs and a full example'''
request_data = parl_init_TWFY.fetch_data_online('getMPs', '&party=Liberal')
test_reference = [
{
"name": "<NAME>",
"office":
[
{
"dept": "Welsh Affairs Committee",
"from_date": "2015-07-13",
"to_date": "9999-12-31",
"position": "Member"
}
],
"member_id": "40728",
"person_id": "11489",
"party": "Liberal Democrat",
"constituency": "Ceredigion"
}
]
# test first record
self.assertEqual( request_data[0:1], test_reference )
# test number of responses
self.assertEqual( len(request_data), 8)
def test_fetch_addresses_GOVxml_api(self):
'''FETCH:: Test the GOV api returns addresses in the correct format in XML'''
test_constituency = "Ceredigion"
xml_results = parl_init_GOV.fetch_xml_online(
request='constituency='+test_constituency+'/',
output='Addresses/'
)
test_reference = '''
<Members>
<Member Member_Id="1498" Dods_Id="31723" Pims_Id="4845">
<DisplayAs>Mr <NAME></DisplayAs>
<ListAs>Williams, Mr Mark</ListAs>
<FullTitle>Mr <NAME> MP</FullTitle>
<LayingMinisterName/>
<DateOfBirth>1966-03-24T00:00:00</DateOfBirth>
<DateOfDeath xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:nil="true"/>
<Gender>M</Gender>
<Party Id="17">Liberal Democrat</Party>
<House>Commons</House>
<MemberFrom>Ceredigion</MemberFrom>
<HouseStartDate>2005-05-05T00:00:00</HouseStartDate>
<HouseEndDate xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:nil="true"/>
<CurrentStatus Id="0" IsActive="True">
<Name>Current Member</Name>
<Reason/>
<StartDate>2015-05-07T00:00:00</StartDate>
</CurrentStatus>
<Addresses>
<Address Type_Id="6">
<Type>Website</Type>
<IsPreferred>False</IsPreferred>
<IsPhysical>False</IsPhysical>
<Note/>
<Address1>http://www.markwilliams.org.uk/</Address1>
</Address>
<Address Type_Id="4">
<Type>Constituency</Type>
<IsPreferred>False</IsPreferred>
<IsPhysical>True</IsPhysical>
<Note/>
<Address1>32 North Parade</Address1>
<Address2>Aberystwyth</Address2>
<Address3/>
<Address4/>
<Address5>Ceredigion</Address5>
<Postcode>SY23 2NF</Postcode>
<Phone>01970 627721</Phone>
<Fax/>
<Email/>
<OtherAddress/>
</Address>
<Address Type_Id="1">
<Type>Parliamentary</Type>
<IsPreferred>False</IsPreferred>
<IsPhysical>True</IsPhysical>
<Note/>
<Address1>House of Commons</Address1>
<Address2/>
<Address3/>
<Address4/>
<Address5>London</Address5>
<Postcode>SW1A 0AA</Postcode>
<Phone>020 7219 8469</Phone>
<Fax/>
<Email><EMAIL></Email>
<OtherAddress/>
</Address>
<Address Type_Id="7">
<Type>Twitter</Type>
<IsPreferred>False</IsPreferred>
<IsPhysical>False</IsPhysical>
<Note/>
<Address1>https://twitter.com/mark4ceredigion</Address1>
</Address>
</Addresses>
</Member>
</Members>
'''
returned_string = etree.tostring(xml_results, pretty_print=True)
returned_string = "\n "+returned_string.replace(">\n", ">\n ")
# print test_reference, '----', returned_string
self.assertEqual(test_reference, returned_string)
class TestBuildDataMethods(unittest.TestCase):
def test_build_mp_and_office_list(self):
'''BUILD:: Test the MPandOffice tuple list is build correctly,
starting with TWFY API output'''
# due to shifting to sqlalchemy, this function is almost redundant
# as no longer use tuples. have rewritten to be dicts.
test_data = [
{
"name": "<NAME>",
"office":
[
{
"dept": "Welsh Affairs Committee",
"from_date": "2015-07-13",
"to_date": "9999-12-31",
"position": "Member"
},
{
"dept": "Foreign Office",
"from_date": "2015-07-13",
"to_date": "9999-12-31",
"position": "Foreign Secretary"
}
],
"member_id": "40728",
"person_id": "11489",
"party": "Liberal Democrat",
"constituency": "Ceredigion"
},
{
"name": "<NAME>",
"member_id": "40730",
"person_id": "11491",
"party": "Labour",
"constituency": "York Outer"
}
]
processed_data = parl_init_TWFY.build_mp_and_office_lists(test_data)
test_reference = (
[
{
'name':"<NAME>",
'party':"Liberal Democrat",
'member_id':40728,
'person_id':11489,
'constituency':"Ceredigion"
},{
'name':"<NAME>",
'party':"Labour",
'member_id':40730,
'person_id':11491,
'constituency':"York Outer"
}
],
[
{
'person_id':11489,
'department':"Welsh Affairs Committee",
'start_date':"2015-07-13",
'end_date':"9999-12-31",
'name':"<NAME>",
'title':"Member"
},{
'person_id':11489,
'department':"Foreign Office",
'start_date':"2015-07-13",
'end_date':"9999-12-31",
'name':"<NAME>",
'title':"Foreign Secretary"
}
]
)
self.assertEqual(processed_data, test_reference)
def test_build_mp_addresses_from_consituency(self):
"""BUILD:: Test the address list is build properly from address xml"""
test_data = '''
<Members>
<Member Member_Id="1498" Dods_Id="31723" Pims_Id="4845">
<DisplayAs>Mr <NAME></DisplayAs>
<ListAs>Williams, Mr Mark</ListAs>
<FullTitle>Mr <NAME> MP</FullTitle>
<LayingMinisterName/>
<DateOfBirth>1966-03-24T00:00:00</DateOfBirth>
<DateOfDeath xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:nil="true"/>
<Gender>M</Gender>
<Party Id="17">Liberal Democrat</Party>
<House>Commons</House>
<MemberFrom>Ceredigion</MemberFrom>
<HouseStartDate>2005-05-05T00:00:00</HouseStartDate>
<HouseEndDate xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:nil="true"/>
<CurrentStatus Id="0" IsActive="True">
<Name>Current Member</Name>
<Reason/>
<StartDate>2015-05-07T00:00:00</StartDate>
</CurrentStatus>
<Addresses>
<Address Type_Id="6">
<Type>Website</Type>
<IsPreferred>False</IsPreferred>
<IsPhysical>False</IsPhysical>
<Note/>
<Address1>http://www.markwilliams.org.uk/</Address1>
</Address>
<Address Type_Id="4">
<Type>Constituency</Type>
<IsPreferred>False</IsPreferred>
<IsPhysical>True</IsPhysical>
<Note/>
<Address1>32 North Parade</Address1>
<Address2>Aberystwyth</Address2>
<Address3/>
<Address4/>
<Address5>Ceredigion</Address5>
<Postcode>SY23 2NF</Postcode>
<Phone>01970 627721</Phone>
<Fax/>
<Email/>
<OtherAddress/>
</Address>
<Address Type_Id="1">
<Type>Parliamentary</Type>
<IsPreferred>False</IsPreferred>
<IsPhysical>True</IsPhysical>
<Note/>
<Address1>House of Commons</Address1>
<Address2/>
<Address3/>
<Address4/>
<Address5>London</Address5>
<Postcode>SW1A 0AA</Postcode>
<Phone>020 7219 8469</Phone>
<Fax/>
<Email><EMAIL></Email>
<OtherAddress/>
</Address>
<Address Type_Id="7">
<Type>Twitter</Type>
<IsPreferred>False</IsPreferred>
<IsPhysical>False</IsPhysical>
<Note/>
<Address1>https://twitter.com/mark4ceredigion</Address1>
</Address>
</Addresses>
</Member>
</Members>
'''
reference_xml = etree.fromstring(test_data)
processed_data = parl_init_GOV.build_mp_addresses_from_constituency(reference_xml)
test_reference = {
"official_ID":"1498",
"name":"Mr <NAME>",
"constituency":"Ceredigion",
"addresses": {
"twitter":"https://twitter.com/mark4ceredigion",
"website":"http://www.markwilliams.org.uk/"
}
}
self.assertEqual(processed_data, test_reference)
class TestLoadDataMethods(unittest.TestCase):
def setUp(self):
self.test_db = "test.db"
self.engine = main_setup.create_database('sqlite:///'+self.test_db)
self.session_factory = sessionmaker(bind=self.engine)
def tearDown(self):
os.remove(self.test_db)
def test_load_constituencies(self):
'''LOAD:: Test the load_constituencies method correctly loads into a test database'''
session = self.session_factory()
parl_init_TWFY.load_constituencies(session)
session.commit()
with sqlite3.connect(self.test_db) as connection:
cur = connection.cursor()
cur.execute("SELECT * FROM MPCommons")
loaded_constituencies = cur.fetchall()
test_reference = [
(None, u'Worsley and Eccles South', 0, None, None, None, None, None),
(None, u'Worthing West', 0, None, None, None, None, None),
(None, u'Wrexham', 0, None, None, None, None, None),
(None, u'Wycombe', 0, None, None, None, None, None),
(None, u'Wyre and Preston North', 0, None, None, None, None, None),
(None, u'Wyre Forest', 0, None, None, None, None, None),
(None, u'Wythenshawe and Sale East', 0, None, None, None, None, None),
(None, u'Yeovil', 0, None, None, None, None, None),
(None, u'Ynys M\xf4n', 0, None, None, None, None, None),
(None, u'York Central', 0, None, None, None, None, None),
(None, u'York Outer', 0, None, None, None, None, None)
]
self.assertEqual( loaded_constituencies[-11:], test_reference)
def test_load_mp_details(self):
'''LOAD:: Load_constituencies as setUp, and test mp details (general and committees)
have loaded correctly into test db '''
session = self.session_factory()
# SetUp: Load constituencies. Note: method had been tested separately
parl_init_TWFY.load_constituencies(session)
# End of SetUp
parl_init_TWFY.load_mp_details(session)
session.commit()
with sqlite3.connect(self.test_db) as connection:
cur = connection.cursor()
cur.execute("SELECT * FROM MPCommons")
loaded_mps = cur.fetchall()
mp_test_reference = [
(u'<NAME>', u'Wythenshawe and Sale East', 1, u'Labour', None, 40912, 25220, None),
(u'<NAME>', u'Yeovil', 1, u'Conservative', None, 41102, 25384, None),
(u'<NAME>', u'Ynys M\xf4n', 1, u'Labour', None, 40873, 11148, None),
(u'<NAME>', u'York Central', 1, u'Labour/Co-operative', None, 41325, 25433, None),
(u'<NAME>', u'York Outer', 1, u'Conservative', None, 41326, 24853, None)
]
# Test MPs general data has loaded
self.assertEqual( loaded_mps[-5:], mp_test_reference )
cur.execute("SELECT * FROM Offices WHERE Name='<NAME>'")
loaded_offices = cur.fetchall()
offices_test_reference = [
(10040, u"Speaker's Committee for the Independent Parliamentary Standards Authority", u'2015-05-18', u'9999-12-31', u'<NAME>', u'Chair'),
(10040, u"Speaker's Committee on the Electoral Commission", u'2015-03-30', u'9999-12-31', u'<NAME>', u'Member'),
(10040, u'', u'2009-06-22', u'9999-12-31', u'<NAME>', u'Speaker of the House of Commons'),
(10040, u'House of Commons Commission', u'2009-06-22', u'9999-12-31', u'<NAME>', u'Member')
]
self.maxDiff = None
# Test MPs committess data has loaded
self.assertEqual( set(loaded_offices[-4:]), set(offices_test_reference) )
def test_load_addresses_from_constituency(self):
'''LOAD:: Test the addresses are loaded for a given constituency'''
session = self.session_factory()
parl_init_GOV.load_addresses_from_constituency(u"Ceredigion", session)
parl_init_GOV.load_addresses_from_constituency(u"York Central", session)
parl_init_GOV.load_addresses_from_constituency(u"Ynys M\xf4n", session)
# parl_init_GOV.load_addresses_from_constituency(u"Sheffield, Brightside and Hillsborough", self.test_db)
session.commit()
with sqlite3.connect(self.test_db) as connection:
cur = connection.cursor()
cur.execute("SELECT * FROM Addresses ORDER BY OfficialID, AddressType ASC")
loaded_addresses = cur.fetchall()
addresses_test_reference = [
(1474, u'twitter', u'https://twitter.com/AlbertOwenMP'),
(1474, u'website', u'http://albertowenmp.org/'),
(1498, u"twitter", u"https://twitter.com/mark4ceredigion"),
(1498, u"website", u"http://www.markwilliams.org.uk/" ),
(4471, u'twitter', u'https://twitter.com/rachaelmaskell'),
(4471, u'website', u'http://www.rachaelmaskell.com/')
]
self.assertEqual( loaded_addresses, addresses_test_reference )
pass
class TestDatabaseAccessorMethods(unittest.TestCase):
def setUp(self):
self.test_db = "test.db"
self.engine = main_setup.create_database('sqlite:///'+self.test_db)
self.session_factory = sessionmaker(bind=self.engine)
session = self.session_factory()
# Build test database with reference data to test accessors
main_setup.create_database('sqlite:///'+self.test_db)
parl_init_TWFY.load_constituencies(session)
session.commit()
test_reference = (
[
(
"<NAME>",
"Liberal Democrat",
40728,
11489,
123456789,
"Ceredigion",
),(
"<NAME>",
"Labour",
40730,
11491,
987654321,
"York Outer"
),(
"<NAME>",
"Labour",
40732,
11493,
11223344,
"Belfast West"
)
],
[
(
11489,
"Welsh Affairs Committee",
"2015-07-13",
"9999-12-31",
"<NAME>",
"Member"
),(
11489,
"Foreign Office",
"2015-07-13",
"9999-12-31",
"<NAME>",
"Foreign Secretary"
)
],
[
(
11223344,
'twitter',
'whatahandle'
)
]
)
with sqlite3.connect(self.test_db) as connection:
cur = connection.cursor()
cur.executemany('UPDATE MPCommons SET Name=?,Party=?,MP=1,MemberId=?,PersonId=?, OfficialId=?\
WHERE Constituency=?', test_reference[0])
cur.executemany('INSERT INTO Offices VALUES(?,?,?,?,?,?)', test_reference[1])
cur.executemany('INSERT INTO Addresses VALUES(?,?,?)', test_reference[2])
def tearDown(self):
os.remove(self.test_db)
def test_return_constituency_list(self):
'''ACCESS:: Test all constituencies returned in a list'''
arch = archipelago.Archipelago("sqlite:///test.db")
constituency_list = arch.get_constituencies()
start_constituencies = [u'Aberavon', u'Aberconwy', u'Aberdeen North']
end_constituencies = | |
# coding: utf-8
"""
tweak-api
Tweak API to integrate with all the Tweak services. You can find out more about Tweak at <a href='https://www.tweak.com'>https://www.tweak.com</a>, #tweak.
OpenAPI spec version: 1.0.8-beta.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class TeamBuilderConfigProductTypeApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def team_builder_config_product_types_change_stream_get(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.team_builder_config_product_types_change_stream_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.team_builder_config_product_types_change_stream_get_with_http_info(**kwargs)
else:
(data) = self.team_builder_config_product_types_change_stream_get_with_http_info(**kwargs)
return data
def team_builder_config_product_types_change_stream_get_with_http_info(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.team_builder_config_product_types_change_stream_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['options']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method team_builder_config_product_types_change_stream_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/TeamBuilderConfigProductTypes/change-stream'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'options' in params:
query_params['options'] = params['options']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def team_builder_config_product_types_change_stream_post(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.team_builder_config_product_types_change_stream_post(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.team_builder_config_product_types_change_stream_post_with_http_info(**kwargs)
else:
(data) = self.team_builder_config_product_types_change_stream_post_with_http_info(**kwargs)
return data
def team_builder_config_product_types_change_stream_post_with_http_info(self, **kwargs):
"""
Create a change stream.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.team_builder_config_product_types_change_stream_post_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str options:
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['options']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method team_builder_config_product_types_change_stream_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/TeamBuilderConfigProductTypes/change-stream'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'options' in params:
form_params.append(('options', params['options']))
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def team_builder_config_product_types_count_get(self, **kwargs):
"""
Count instances of the model matched by where from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.team_builder_config_product_types_count_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.team_builder_config_product_types_count_get_with_http_info(**kwargs)
else:
(data) = self.team_builder_config_product_types_count_get_with_http_info(**kwargs)
return data
def team_builder_config_product_types_count_get_with_http_info(self, **kwargs):
"""
Count instances of the model matched by where from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.team_builder_config_product_types_count_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str where: Criteria to match model instances
:return: InlineResponse2001
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['where']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method team_builder_config_product_types_count_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/TeamBuilderConfigProductTypes/count'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'where' in params:
query_params['where'] = params['where']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/xml', 'text/xml', 'application/javascript', 'text/javascript'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/x-www-form-urlencoded', 'application/xml', 'text/xml'])
# Authentication setting
auth_settings = ['access_token']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2001',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
collection_formats=collection_formats)
def team_builder_config_product_types_find_one_get(self, **kwargs):
"""
Find first instance of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.team_builder_config_product_types_find_one_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: TeamBuilderConfigProductType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.team_builder_config_product_types_find_one_get_with_http_info(**kwargs)
else:
(data) = self.team_builder_config_product_types_find_one_get_with_http_info(**kwargs)
return data
def team_builder_config_product_types_find_one_get_with_http_info(self, **kwargs):
"""
Find first instance of the model matched by filter from the data source.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.team_builder_config_product_types_find_one_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str filter: Filter defining fields, where, include, order, offset, and limit - must be a JSON-encoded string ({\"something\":\"value\"})
:return: TeamBuilderConfigProductType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['filter']
all_params.append('callback')
all_params.append('_return_http_data_only')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method team_builder_config_product_types_find_one_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = | |
'AuthDBChange:AuthGroup$A group!1800': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_GROUP_DELETED,
'class_': [u'AuthDBChange', u'AuthDBGroupChange'],
'comment': u'Deleted',
'old_description': u'Another blah',
'old_owners': u'another-owners',
'target': u'AuthGroup$A group',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
def test_ip_whitelists_diff(self):
def create():
make_ip_whitelist(
name='A list',
subnets=['127.0.0.1/32', '127.0.0.2/32'],
description='Blah',
comment='New list')
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthIPWhitelist$A list!3000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_CREATED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'New list',
'description': u'Blah',
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthIPWhitelist$A list!3200': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'New list',
'subnets': [u'127.0.0.1/32', u'127.0.0.2/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
def modify():
l = model.ip_whitelist_key('A list').get()
l.subnets = ['127.0.0.1/32', '127.0.0.3/32']
l.description = 'Another blah'
l.record_revision(
modified_by=ident('<EMAIL>'),
modified_ts=utils.utcnow(),
comment='Changed')
l.put()
changes = self.grab_all(self.auth_db_transaction(modify))
self.assertEqual({
'AuthDBChange:AuthIPWhitelist$A list!3100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_DESCRIPTION_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Changed',
'description': u'Another blah',
'old_description': u'Blah',
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthIPWhitelist$A list!3200': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Changed',
'subnets': [u'127.0.0.3/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthIPWhitelist$A list!3300': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Changed',
'subnets': [u'127.0.0.2/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
def delete():
l = model.ip_whitelist_key('A list').get()
l.record_deletion(
modified_by=ident('<EMAIL>'),
modified_ts=utils.utcnow(),
comment='Deleted')
l.key.delete()
changes = self.grab_all(self.auth_db_transaction(delete))
self.assertEqual({
'AuthDBChange:AuthIPWhitelist$A list!3300': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_SUBNETS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Deleted',
'subnets': [u'127.0.0.1/32', u'127.0.0.3/32'],
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthIPWhitelist$A list!3400': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_IPWL_DELETED,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistChange'],
'comment': u'Deleted',
'old_description': u'Another blah',
'target': u'AuthIPWhitelist$A list',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
def test_ip_wl_assignments_diff(self):
def create():
a = model.AuthIPWhitelistAssignments(
key=model.ip_whitelist_assignments_key(),
assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('<EMAIL>'),
ip_whitelist='An IP whitelist'),
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('<EMAIL>'),
ip_whitelist='Another IP whitelist'),
])
a.record_revision(
modified_by=ident('<EMAIL>'),
modified_ts=utils.utcnow(),
comment='New assignment')
a.put()
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:<EMAIL>!5000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'New assignment',
'identity': model.Identity(kind='user', name='<EMAIL>'),
'ip_whitelist': u'An IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:<EMAIL>',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:<EMAIL>!5000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'New assignment',
'identity': model.Identity(kind='user', name='<EMAIL>'),
'ip_whitelist': u'Another IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:b<EMAIL>',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
def change():
a = model.ip_whitelist_assignments_key().get()
a.assignments=[
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('<EMAIL>'),
ip_whitelist='Another IP whitelist'),
model.AuthIPWhitelistAssignments.Assignment(
identity=ident('<EMAIL>'),
ip_whitelist='IP whitelist'),
]
a.record_revision(
modified_by=ident('<EMAIL>'),
modified_ts=utils.utcnow(),
comment='change')
a.put()
changes = self.grab_all(self.auth_db_transaction(change))
self.assertEqual({
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:<EMAIL>!5000': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'change',
'identity': model.Identity(kind='user', name='<EMAIL>'),
'ip_whitelist': u'Another IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:<EMAIL>',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:<EMAIL>!5100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_UNSET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'change',
'identity': model.Identity(kind='user', name='<EMAIL>'),
'ip_whitelist': u'Another IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:<EMAIL>',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthIPWhitelistAssignments$'
'default$user:<EMAIL>!5000': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_IPWLASSIGN_SET,
'class_': [u'AuthDBChange', u'AuthDBIPWhitelistAssignmentChange'],
'comment': u'change',
'identity': model.Identity(kind='user', name='<EMAIL>'),
'ip_whitelist': u'IP whitelist',
'target': u'AuthIPWhitelistAssignments$default$user:<EMAIL>',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
def test_global_config_diff(self):
def create():
c = model.AuthGlobalConfig(
key=model.root_key(),
oauth_client_id='client_id',
oauth_client_secret='client_secret',
oauth_additional_client_ids=['1', '2'])
c.record_revision(
modified_by=ident('<EMAIL>'),
modified_ts=utils.utcnow(),
comment='Config change')
c.put()
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthGlobalConfig$root!7000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_CONF_OAUTH_CLIENT_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_client_id': u'client_id',
'oauth_client_secret': u'client_secret',
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthGlobalConfig$root!7100': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_additional_client_ids': [u'1', u'2'],
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
def modify():
c = model.root_key().get()
c.oauth_additional_client_ids = ['1', '3']
c.token_server_url = 'https://token-server'
c.security_config = security_config(['hi'])
c.record_revision(
modified_by=ident('<EMAIL>'),
modified_ts=utils.utcnow(),
comment='Config change')
c.put()
changes = self.grab_all(self.auth_db_transaction(modify))
self.assertEqual({
'AuthDBChange:AuthGlobalConfig$root!7100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_ADDED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_additional_client_ids': [u'3'],
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthGlobalConfig$root!7200': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_CONF_CLIENT_IDS_REMOVED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'oauth_additional_client_ids': [u'2'],
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthGlobalConfig$root!7300': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_CONF_TOKEN_SERVER_URL_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'target': u'AuthGlobalConfig$root',
'token_server_url_new': u'https://token-server',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthGlobalConfig$root!7400': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_CONF_SECURITY_CONFIG_CHANGED,
'class_': [u'AuthDBChange', u'AuthDBConfigChange'],
'comment': u'Config change',
'security_config_new': security_config(['hi']),
'target': u'AuthGlobalConfig$root',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
def test_realms_globals_diff(self):
def create():
c = model.AuthRealmsGlobals(
key=model.realms_globals_key(),
permissions=[
realms_pb2.Permission(name='luci.dev.p1'),
realms_pb2.Permission(name='luci.dev.p2'),
realms_pb2.Permission(name='luci.dev.p3'),
])
c.record_revision(
modified_by=ident('<EMAIL>'),
modified_ts=utils.utcnow(),
comment='New realms config')
c.put()
self.auth_db_transaction(create)
def modify():
ent = model.realms_globals_key().get()
ent.permissions = [
realms_pb2.Permission(name='luci.dev.p1'),
realms_pb2.Permission(name='luci.dev.p3'),
realms_pb2.Permission(name='luci.dev.p4'),
]
ent.record_revision(
modified_by=ident('<EMAIL>'),
modified_ts=utils.utcnow(),
comment='Realms config change')
ent.put()
changes = self.grab_all(self.auth_db_transaction(modify))
self.assertEqual({
'AuthDBChange:AuthRealmsGlobals$globals!9000': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_REALMS_GLOBALS_CHANGED,
'class_': [u'AuthDBChange', u'AuthRealmsGlobalsChange'],
'comment': u'Realms config change',
'permissions_added': [u'luci.dev.p4'],
'permissions_removed': [u'luci.dev.p2'],
'target': u'AuthRealmsGlobals$globals',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
def test_project_realms_diff(self):
# Note: in reality Realms.api_version is fixed. We change it in this test
# since it is the simplest field to change.
def create():
p = model.AuthProjectRealms(
key=model.project_realms_key('proj1'),
realms=realms_pb2.Realms(api_version=123),
config_rev='config_rev1',
perms_rev='perms_rev1')
p.record_revision(
modified_by=ident('<EMAIL>'),
modified_ts=utils.utcnow(),
comment='Created')
p.put()
changes = self.grab_all(self.auth_db_transaction(create))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10000': {
'app_version': u'v1a',
'auth_db_rev': 1,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CREATED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Created',
'config_rev_new': u'config_rev1',
'perms_rev_new': u'perms_rev1',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
def update(api_version, config_rev, perms_rev):
p = model.project_realms_key('proj1').get()
p.realms = realms_pb2.Realms(api_version=api_version)
p.config_rev = config_rev
p.perms_rev = perms_rev
p.record_revision(
modified_by=ident('<EMAIL>'),
modified_ts=utils.utcnow(),
comment='Updated')
p.put()
# Update everything.
changes = self.grab_all(self.auth_db_transaction(
lambda: update(1234, 'config_rev2', 'perms_rev2')))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10100': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CHANGED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Updated',
'config_rev_new': u'config_rev2',
'config_rev_old': u'config_rev1',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
'AuthDBChange:AuthProjectRealms$proj1!10200': {
'app_version': u'v1a',
'auth_db_rev': 2,
'change_type':
change_log.AuthDBChange.CHANGE_PROJECT_REALMS_REEVALUATED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Updated',
'perms_rev_new': u'perms_rev2',
'perms_rev_old': u'perms_rev1',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
# Update realms_pb2.Realms, but do not change revisions.
changes = self.grab_all(self.auth_db_transaction(
lambda: update(12345, 'config_rev2', 'perms_rev2')))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10100': {
'app_version': u'v1a',
'auth_db_rev': 3,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_CHANGED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Updated',
'config_rev_new': u'config_rev2',
'config_rev_old': u'config_rev2',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
# Update revisions, but don't actually touch realms.
changes = self.grab_all(self.auth_db_transaction(
lambda: update(12345, 'config_rev3', 'perms_rev3')))
self.assertEqual({}, changes)
def delete():
p = model.project_realms_key('proj1').get()
p.record_deletion(
modified_by=ident('<EMAIL>'),
modified_ts=utils.utcnow(),
comment='Deleted')
p.key.delete()
changes = self.grab_all(self.auth_db_transaction(delete))
self.assertEqual({
'AuthDBChange:AuthProjectRealms$proj1!10300': {
'app_version': u'v1a',
'auth_db_rev': 5,
'change_type': change_log.AuthDBChange.CHANGE_PROJECT_REALMS_REMOVED,
'class_': [u'AuthDBChange', u'AuthProjectRealmsChange'],
'comment': u'Deleted',
'config_rev_old': u'config_rev3',
'perms_rev_old': u'perms_rev3',
'target': u'AuthProjectRealms$proj1',
'when': datetime.datetime(2015, 1, 2, 3, 4, 5),
'who': model.Identity(kind='user', name='<EMAIL>'),
},
}, changes)
class AuthDBChangeTest(test_case.TestCase):
# Test to_jsonish for AuthDBGroupChange and AuthDBIPWhitelistAssignmentChange,
# the rest are trivial.
def test_group_change_to_jsonish(self):
c = change_log.AuthDBGroupChange(
change_type=change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
target='AuthGroup$abc',
auth_db_rev=123,
who=ident('<EMAIL>'),
when=datetime.datetime(2015, 1, 2, 3, 4, 5),
comment='A comment',
app_version='v123',
description='abc',
members=[ident('<EMAIL>')],
globs=[glob('*<EMAIL>')],
nested=['A'],
owners='abc',
old_owners='def')
self.assertEqual({
'app_version': 'v123',
'auth_db_rev': 123,
'change_type': 'GROUP_MEMBERS_ADDED',
'comment': 'A comment',
'description': 'abc',
'globs': ['user:*<EMAIL>'],
'members': ['user:<EMAIL>'],
'nested': ['A'],
'old_description': None,
'old_owners': 'def',
'owners': 'abc',
'target': 'AuthGroup$abc',
'when': 1420167845000000,
'who': 'user:<EMAIL>',
}, c.to_jsonish())
def test_wl_assignment_to_jsonish(self):
c = change_log.AuthDBIPWhitelistAssignmentChange(
change_type=change_log.AuthDBChange.CHANGE_GROUP_MEMBERS_ADDED,
target='AuthIPWhitelistAssignments$default',
auth_db_rev=123,
who=ident('<EMAIL>'),
when=datetime.datetime(2015, 1, 2, 3, 4, 5),
comment='A comment',
app_version='v123',
identity=ident('<EMAIL>'),
ip_whitelist='whitelist')
self.assertEqual({
'app_version': 'v123',
'auth_db_rev': 123,
'change_type': 'GROUP_MEMBERS_ADDED',
'comment': 'A comment',
'identity': 'user:<EMAIL>',
'ip_whitelist': 'whitelist',
'target': 'AuthIPWhitelistAssignments$default',
'when': 1420167845000000,
'who': 'user:<EMAIL>',
}, c.to_jsonish())
def test_security_config_change_to_jsonish(self):
c = change_log.AuthDBConfigChange(
change_type=change_log.AuthDBChange.CHANGE_CONF_SECURITY_CONFIG_CHANGED,
target='AuthGlobalConfig$default',
auth_db_rev=123,
who=ident('<EMAIL>'),
when=datetime.datetime(2015, 1, 2, 3, 4, 5),
comment='A comment',
app_version='v123',
security_config_old=None,
security_config_new=security_config(['hi']))
self.assertEqual({
'app_version': 'v123',
'auth_db_rev': 123,
'change_type': 'CONF_SECURITY_CONFIG_CHANGED',
'comment': 'A comment',
'oauth_additional_client_ids': [],
'oauth_client_id': None,
'oauth_client_secret': None,
'security_config_new': {'internal_service_regexp': [u'hi']},
'security_config_old': None,
| |
parallel_reject_flags_length = Combine(word_reject_flags_prefix +
word_reject_flags_length +
Groups.positions_in_bracket())
one_reject_flags = simple_reject_flags | parallel_reject_flags | parallel_reject_flags_slash | simple_reject_flags_length | parallel_reject_flags_length
return ZeroOrMore(one_reject_flags)
@staticmethod
def unary_cmds():
""" Parse Commands with no parameters
: no-op: do nothing to the input word
l convert to lowercase
u convert to uppercase
c capitalize
C lowercase the first character, and uppercase the rest
t toggle case of all characters in the word
r reverse: "Fred" -> "derF"
d duplicate: "Fred" -> "FredFred"
f reflect: "Fred" -> "FredderF"
{ rotate the word left: "jsmith" -> "smithj"
} rotate the word right: "smithj" -> "jsmith"
[ delete the first character
] delete the last character
q Duplicate every character
k Swaps first two characters
K Swaps last two characters
E Lower case the whole line, then upper case the first letter and every letter after a space
P "crack" -> "cracked", etc. (lowercase only)
I "crack" -> "cracking", etc. (lowercase only)
S shift case: "Crack96" -> "cRACK(^"
V lowercase vowels, uppercase consonants: "Crack96" -> "CRaCK96"
M Memorize current word
Q Reject plains where the memory saved matches current word
p pluralize: "crack" -> "cracks", etc. JTR Only
R shift each character right, by keyboard: "Crack96" -> "Vtsvl07" JTR Only
L shift each character left, by keyboard: "Crack96" -> "Xeaxj85" JTR Only
"""
str_unary_cmds = ":lucCtrdf}{][qkKEPISVMQ"
if RUNTIME_CONFIG.is_jtr():
# JTR only pRL
str_unary_cmds += 'pRL'
# Some char definitions
must_escaped_chars = "]["
# Escape []
for c in must_escaped_chars:
str_unary_cmds = str_unary_cmds.replace(c, "")
simple_unary_cmds = Combine(Word(str_unary_cmds, exact=1))
for c in must_escaped_chars:
simple_unary_cmds = Literal("\\" + c) | simple_unary_cmds
# Type 1 [cmds]
parallel_unary_cmds_1 = Combine(
Elements._add_brackets(simple_unary_cmds))
# Type 2 :[cmds]
parallel_unary_cmds_2 = Combine(
Literal(":") + parallel_unary_cmds_1)
# Type 3 \p[cmds] or \p1[cmds]
parallel_unary_cmds_3 = Combine(
Elements._create_slash_parallel_cmds(parallel_unary_cmds_1))
# Type 4 \0-\9
parallel_unary_cmds_4 = Combine(
Elements._create_slash_number_cmds())
one_unary_cmd = parallel_unary_cmds_4 | parallel_unary_cmds_3 | parallel_unary_cmds_2 | simple_unary_cmds | parallel_unary_cmds_1
else:
simple_unary_cmds = Combine(Word(str_unary_cmds, exact=1))
one_unary_cmd = simple_unary_cmds
unary_cmds = ZeroOrMore(one_unary_cmd)
return unary_cmds
@staticmethod
def binary_cmds():
""" Parse Commands with 1 parameter
$X append character X to the word
^X prefix the word with character X
TN toggle case of the character in position N
'N truncate the word at length N
DN delete the character in position N
pN Append duplicated word N times, HC only
zN Duplicates first character N times
ZN Duplicates last character N times
LN Bitwise shift left character @ N, HC only
RN Bitwise shift right character @ N, HC only
+N Increment character @ N by 1 ascii value
-N Decrement character @ N by 1 ascii value, HC only
.N Replaces character @ N with value at @ N plus 1
,N Replaces character @ N with value at @ N minus 1
yN Duplicates first N characters
YN Duplicates last N characters
"""
# commands that take a character as a parameter, and doesn't allow ?c
str_char_cmds_prefix = "$^"
# commands that take a position as a parameter
str_position_cmds_prefix = "T'DzZ+.,yY"
if RUNTIME_CONFIG.is_hc():
str_position_cmds_prefix += '-pRL' # HC only pN cmd
# simple cmds, no parallelism
simple_char_cmds = Combine(
Word(str_char_cmds_prefix, exact=1) + Groups.single_char())
# simple cmds, no parallelism
simple_position_cmds = Combine(
Word(str_position_cmds_prefix, exact=1) + Groups.single_position())
if RUNTIME_CONFIG.is_jtr():
# $[]
parrallel_char_cmds_1 = Combine(
Word(str_char_cmds_prefix, exact=1) + Groups.chars_in_bracket())
# $\p[] $\r[]
parrallel_char_cmds_2 = Combine(
Word(str_char_cmds_prefix, exact=1) +
Elements._create_slash_parallel_cmds(Groups.chars_in_bracket()))
# $\0-\9
parrallel_char_cmds_3 = Combine(
Word(str_char_cmds_prefix, exact=1) +
Elements._create_slash_number_cmds())
char_cmds = simple_char_cmds | parrallel_char_cmds_3 | parrallel_char_cmds_2 | parrallel_char_cmds_1
# T[]
parrallel_position_cmds_1 = Combine(
Word(str_position_cmds_prefix, exact=1) +
Groups.positions_in_bracket())
# T\p[] T\r[]
parrallel_position_cmds_2 = Combine(
Word(str_position_cmds_prefix, exact=1) + Elements.
_create_slash_parallel_cmds(Groups.positions_in_bracket()))
# T\0-\9
parrallel_position_cmds_3 = Combine(
Word(str_position_cmds_prefix, exact=1) +
Elements._create_slash_number_cmds())
position_cmds = simple_position_cmds | parrallel_position_cmds_3 | parrallel_position_cmds_2 | parrallel_position_cmds_1
else:
char_cmds = simple_char_cmds
position_cmds = simple_position_cmds
return ZeroOrMore(char_cmds | position_cmds)
@staticmethod
def ternary_cmds():
r""" Parse Commands with 2 parameters
AN"STR" insert string STR into the word at position N
xNM extract substring from position N for up to M characters
iNX insert character X in position N and shift the rest right
oNX overstrike character in position N with character X
ONM Deletes M characters, starting at position N
*NM Swaps character at position N with character at position M #HC Only
"""
# Some defs of possible positions
all_positions = Groups.get_all_possible("simple_position")
# Some defs of possible chars
all_chars = Groups.get_all_possible(char_type="char")
# prefixer
str_prefix_nx = "oi"
str_prefix_nm = "xO"
if RUNTIME_CONFIG.is_hc():
str_prefix_nm += '*'
str_prefix_nstr = "A"
# Building Parser
nx_cmds = Combine(
Word(str_prefix_nx, exact=1) + all_positions + all_chars)
nm_cmds = Combine(
Word(str_prefix_nm, exact=1) + all_positions + all_positions)
all_cmds = nm_cmds | nx_cmds
if RUNTIME_CONFIG.is_jtr():
all_chars_append = Groups.get_all_possible("char_append")
# build
nstr_cmds = Combine(
Word(str_prefix_nstr, exact=1) + all_positions + Suppress('"') +
OneOrMore(all_chars_append) + Suppress('"'))
all_cmds = all_cmds | nstr_cmds
return ZeroOrMore(all_cmds)
@staticmethod
def memory_cmds():
""" Memory access commands.
M & Q are defined in simple commands
4 Append the word saved to memory to current word. HC only
6 Prepend the word saved to memory to current word. HC only
XNMI Insert substring of length M starting from position N of word saved to memory at position I
vVNM update "l" (length), then subtract M from N and assign to variable V. JtR only
"""
# MQ46
str_memory_cmds = ""
if RUNTIME_CONFIG.is_hc():
str_memory_cmds += "46"
simple_memory_cmds = Word(str_memory_cmds, exact=1)
# xnmi
all_positions = Groups.get_all_possible("simple_position")
xnmi_cmd = Combine(
Word("X", exact=1) + all_positions + all_positions + all_positions)
# construct all_cmds
all_cmds = simple_memory_cmds | xnmi_cmd
# vvnm
if RUNTIME_CONFIG.is_jtr():
vvnm_cmd = Combine(
Word("v", exact=1) + Word(srange("[a-k]"), exact=1) +
all_positions + all_positions)
all_cmds = all_cmds | vvnm_cmd
return ZeroOrMore(all_cmds)
@staticmethod
def mode_cmds():
""" Extra "single crack" mode commands. JtR only
1 first word only
2 second word only
+ the concatenation of both (should only be used after a "1" or "2")
"""
if RUNTIME_CONFIG[
'running_style'] != RunningStyle.JTR: # Only used in JTR
return Empty()
mode_cmds = Combine(Word("12+", exact=1))
return ZeroOrMore(mode_cmds)
@staticmethod
def length_rejection_cmds():
""" Rejections that don't involve character class.
<N reject the word unless it is less than N characters long
>N reject the word unless it is greater than N characters long
_N reject plains of length not equal to N
"""
str_length_rejections_cmds = "><_"
all_positions = Groups.get_all_possible("simple_position")
all_cmds = Combine(
Word(str_length_rejections_cmds, exact=1) + all_positions)
return ZeroOrMore(all_cmds)
@staticmethod
def char_class_cmds():
""" Character class commands. Rejections and Transformations that involve character class.
!X reject the word if it contains character X
!?C reject the word if it contains a character in class C
/X reject the word unless it contains character X
/?C reject the word unless it contains a character in class C
=NX reject the word unless character in position N is equal to X
=N?C reject the word unless character in position N is in class C
(X reject the word unless its first character is X
(?C reject the word unless its first character is in class C
)X reject the word unless its last character is X
)?C reject the word unless its last character is in class C
%NX reject the word unless it contains at least N instances of X
%N?C reject the word unless it contains at least N characters of class C
sXY replace all characters X in the word with Y
s?CY replace all characters of class C in the word with Y
@X purge all characters X from the word
@?C purge all characters of class C from the word
eX Lower case the whole line, then upper case the first letter and every letter after a custom separator character
e?C Lower case the whole line, then upper case the first letter and every letter after class C
"""
str_something_x_cmds = "!/)(@e"
str_something_nx_cmds = "=%"
str_something_xy_cmds = "s"
# define valid chars
all_char_class_chars = Groups.get_all_possible("char_for_class")
all_single_chars = Groups.get_all_possible("char")
# define valid positions
all_positions = Groups.get_all_possible("simple_position")
something_x_cmds = Combine(
Word(str_something_x_cmds, exact=1) + all_char_class_chars)
something_nx_cmds | |
images in a TIFF file.
"""
yield self.read_image(verbose=verbose)
while not self.LastDirectory():
self.ReadDirectory()
yield self.read_image(verbose=verbose)
self.SetDirectory(0)
def __del__(self):
self.close()
@debug
def FileName(self): return libtiff.TIFFFileName(self)
@debug
def CurrentRow(self): return libtiff.TIFFCurrentRow(self)
@debug
def CurrentStrip(self): return libtiff.TIFFCurrentStrip(self)
@debug
def CurrentTile(self): return libtiff.TIFFCurrentTile(self)
@debug
def CurrentDirectory(self): return libtiff.TIFFCurrentDirectory(self)
@debug
def LastDirectory(self): return libtiff.TIFFLastDirectory(self)
@debug
def ReadDirectory(self): return libtiff.TIFFReadDirectory(self)
@debug
def WriteDirectory(self):
r = libtiff.TIFFWriteDirectory(self)
assert r==1, `r`
@debug
def SetDirectory(self, dirnum): return libtiff.TIFFSetDirectory(self, dirnum)
@debug
def Fileno(self): return libtiff.TIFFFileno(self)
@debug
def GetMode(self): return libtiff.TIFFGetMode(self)
@debug
def IsTiled(self): return libtiff.TIFFIsTiled(self)
@debug
def IsByteSwapped(self): return libtiff.TIFFIsByteSwapped(self)
@debug
def IsUpSampled(self): return libtiff.TIFFIsUpSampled(self)
@debug
def IsMSB2LSB(self): return libtiff.TIFFIsMSB2LSB(self)
@debug
def NumberOfStrips(self): return libtiff.TIFFNumberOfStrips(self).value
#@debug
def ReadRawStrip(self, strip, buf, size):
return libtiff.TIFFReadRawStrip(self, strip, buf, size).value
def ReadEncodedStrip(self, strip, buf, size):
return libtiff.TIFFReadEncodedStrip(self, strip, buf, size).value
def StripSize(self):
return libtiff.TIFFStripSize(self).value
def RawStripSize(self, strip):
return libtiff.TIFFStripSize(self, strip).value
@debug
def WriteRawStrip(self, strip, buf, size):
r = libtiff.TIFFWriteRawStrip(self, strip, buf, size)
assert r.value==size,`r.value, size`
@debug
def WriteEncodedStrip(self, strip, buf, size):
r = libtiff.TIFFWriteEncodedStrip(self, strip, buf, size)
assert r.value==size,`r.value, size`
closed = False
def close(self, libtiff=libtiff):
if not self.closed and self.value is not None:
libtiff.TIFFClose(self)
self.closed = True
return
#def (self): return libtiff.TIFF(self)
#@debug
def GetField(self, tag, ignore_undefined_tag=True, count=None):
""" Return TIFF field value with tag.
tag can be numeric constant TIFFTAG_<tagname> or a
string containing <tagname>.
"""
if tag in ['PixelSizeX', 'PixelSizeY', 'RelativeTime']:
descr = self.GetField('ImageDescription')
if not descr:
return
i = descr.find (tag)
if i==-1:
return
value = eval(descr[i+len (tag):].lstrip().split()[0])
return value
if isinstance(tag, str):
tag = eval('TIFFTAG_' + tag.upper())
t = tifftags.get(tag)
if t is None:
if not ignore_undefined_tag:
print 'Warning: no tag %r defined' % (tag)
return
data_type, convert = t
if tag == TIFFTAG_COLORMAP:
bps = self.GetField("BitsPerSample")
if bps is None:
print "Warning: BitsPerSample is required to get ColorMap, assuming 8 bps..."
bps = 8
num_cmap_elems = 1 << bps
data_type = data_type * num_cmap_elems
pdt = ctypes.POINTER(data_type)
rdata = pdt()
gdata = pdt()
bdata = pdt()
rdata_ptr = ctypes.byref(rdata)
gdata_ptr = ctypes.byref(gdata)
bdata_ptr = ctypes.byref(bdata)
# ignore count, it's not used for colormap
libtiff.TIFFGetField.argtypes = libtiff.TIFFGetField.argtypes[:2] + [ctypes.c_void_p]*3
r = libtiff.TIFFGetField(self, tag, rdata_ptr, gdata_ptr, bdata_ptr)
data = (rdata,gdata,bdata)
else:
if issubclass(data_type, ctypes.Array):
pdt = ctypes.POINTER(data_type)
data = pdt()
else:
data = data_type()
print '-------------------------', data
if count is None:
libtiff.TIFFGetField.argtypes = libtiff.TIFFGetField.argtypes[:2] + [ctypes.c_void_p]
r = libtiff.TIFFGetField(self, tag, ctypes.byref(data))
else:
count = ctypes.c_int(count)
libtiff.TIFFGetField.argtypes = libtiff.TIFFGetField.argtypes[:2] + [ctypes.POINTER(ctypes.c_int), ctypes.c_void_p]
r = libtiff.TIFFGetField(self, tag, ctypes.byref(count), ctypes.byref(data))
if not r: # tag not defined for current directory
if not ignore_undefined_tag:
print 'Warning: tag %r not defined in currect directory' % (tag)
return None
return convert(data)
#@debug
def SetField(self, tag, value, count=None):
""" Set TIFF field value with tag.
tag can be numeric constant TIFFTAG_<tagname> or a
string containing <tagname>.
"""
if isinstance(tag, str):
tag = eval('TIFFTAG_' + tag.upper())
t = tifftags.get(tag)
if t is None:
print 'Warning: no tag %r defined' % (tag)
return
data_type, convert = t
#if data_type == ctypes.c_float:
# data_type = ctypes.c_double
if tag == TIFFTAG_COLORMAP:
# ColorMap passes 3 values each a c_uint16 pointer
try:
r_arr,g_arr,b_arr = value
except (TypeError, ValueError):
print "Error: TIFFTAG_COLORMAP expects 3 uint16* arrays as a list/tuple of lists"
r_arr,g_arr,b_arr = None,None,None
if r_arr is None:
return
bps = self.GetField("BitsPerSample")
if bps is None:
print "Warning: BitsPerSample is required to get ColorMap, assuming 8 bps..."
bps = 8
num_cmap_elems = 1 << bps
data_type = data_type * num_cmap_elems
r_ptr = data_type(*r_arr)
g_ptr = data_type(*g_arr)
b_ptr = data_type(*b_arr)
libtiff.TIFFSetField.argtypes = libtiff.TIFFSetField.argtypes[:2] + [ctypes.POINTER(data_type)]*3
r = libtiff.TIFFSetField(self, tag, r_ptr, g_ptr, b_ptr)
else:
if issubclass(data_type, ctypes.Array):
data = data_type(*value)
elif issubclass(data_type, ctypes._Pointer): # does not include c_char_p
# convert to the base type, ctypes will take care of actually
# sending it by reference
base_type = data_type._type_
if isinstance(value, collections.Iterable):
data = base_type(*value)
else:
data = base_type(value)
else:
data = data_type(value)
# TODO: for most of the tags, count is len(value), so it shouldn't be needed
if count is None:
libtiff.TIFFSetField.argtypes = libtiff.TIFFSetField.argtypes[:2] + [data_type]
r = libtiff.TIFFSetField(self, tag, data)
else:
libtiff.TIFFSetField.argtypes = libtiff.TIFFSetField.argtypes[:2] + [ctypes.c_uint, data_type]
r = libtiff.TIFFSetField(self, tag, count, data)
return r
def info(self):
""" Return a string containing <tag name: field value> map.
"""
l = []
l.append ('FileName: %s' % (self.FileName()))
for tagname in ['Artist', 'CopyRight', 'DateTime', 'DocumentName',
'HostComputer', 'ImageDescription', 'InkNames',
'Make', 'Model', 'PageName', 'Software', 'TargetPrinter',
'BadFaxLines', 'ConsecutiveBadFaxLines',
'Group3Options', 'Group4Options',
'ImageDepth', 'ImageWidth', 'ImageLength',
'RowsPerStrip', 'SubFileType',
'TileDepth', 'TileLength', 'TileWidth',
'StripByteCounts', 'StripOffSets',
'TileByteCounts', 'TileOffSets',
'BitsPerSample', 'CleanFaxData', 'Compression',
'DataType', 'FillOrder', 'InkSet', 'Matteing',
'MaxSampleValue', 'MinSampleValue', 'Orientation',
'PhotoMetric', 'PlanarConfig', 'Predictor',
'ResolutionUnit', 'SampleFormat', 'YCBCRPositioning',
'JPEGQuality', 'JPEGColorMode', 'JPEGTablesMode',
'FaxMode', 'SMaxSampleValue', 'SMinSampleValue',
#'Stonits',
'XPosition', 'YPosition', 'XResolution', 'YResolution',
'PrimaryChromaticities', 'ReferenceBlackWhite',
'WhitePoint', 'YCBCRCoefficients',
'PixelSizeX','PixelSizeY', 'RelativeTime',
'CZ_LSMInfo'
]:
v = self.GetField(tagname)
if v:
if isinstance (v, int):
v = define_to_name_map.get(tagname, {}).get(v, v)
l.append('%s: %s' % (tagname, v))
if tagname=='CZ_LSMInfo':
print CZ_LSMInfo(self)
return '\n'.join(l)
def copy(self, filename, **kws):
""" Copy opened TIFF file to a new file.
Use keyword arguments to redefine tag values.
Parameters
----------
filename : str
Specify the name of file where TIFF file is copied to.
compression : {'none', 'lzw', 'deflate', ...}
Specify compression scheme.
bitspersample : {8,16,32,64,128,256}
Specify bit size of a sample.
sampleformat : {'uint', 'int', 'float', 'complex'}
Specify sample format.
"""
other = TIFF.open(filename, mode='w')
define_rewrite = {}
for name, value in kws.items():
define = TIFF.get_tag_define(name)
assert define is not None
if name=='compression':
value = TIFF._fix_compression(value)
if name=='sampleformat':
value = TIFF._fix_sampleformat(value)
define_rewrite[define] = value
name_define_list = name_to_define_map['TiffTag'].items()
self.SetDirectory(0)
self.ReadDirectory()
while 1:
other.SetDirectory(self.CurrentDirectory())
bits = self.GetField('BitsPerSample')
sample_format = self.GetField('SampleFormat')
assert bits >=8, `bits, sample_format, dtype`
itemsize = bits // 8
dtype = self.get_numpy_type(bits, sample_format)
for name, define in name_define_list:
orig_value = self.GetField(define)
if orig_value is None and define not in define_rewrite:
continue
if name.endswith('OFFSETS') or name.endswith('BYTECOUNTS'):
continue
if define in define_rewrite:
value = define_rewrite[define]
else:
value = orig_value
if value is None:
continue
other.SetField(define, value)
new_bits = other.GetField('BitsPerSample')
new_sample_format = other.GetField('SampleFormat')
new_dtype = other.get_numpy_type(new_bits, new_sample_format)
assert new_bits >=8, `new_bits, new_sample_format, new_dtype`
new_itemsize = new_bits // 8
strip_size = self.StripSize()
new_strip_size = self.StripSize()
buf = np.zeros(strip_size // itemsize, dtype)
for strip in range(self.NumberOfStrips()):
elem = self.ReadEncodedStrip(strip, buf.ctypes.data, strip_size)
if elem>0:
new_buf = buf.astype(new_dtype)
other.WriteEncodedStrip(strip, new_buf.ctypes.data, (elem * new_itemsize)//itemsize)
self.ReadDirectory()
if self.LastDirectory ():
break
other.close ()
class TIFF3D(TIFF):
""" subclass of TIFF for handling import of 3D (multi-directory) files.
like TIFF, but TIFF3D.read_image() will attempt to restore a 3D numpy array
when given a multi-image TIFF file; performing the inverse of
TIFF_instance.write(numpy.zeros((40, 200, 200)))
like so:
arr = TIFF3D_instance.read_image()
arr.shape # gives (40, 200, 200)
if you tried this with a normal TIFF instance, you would get this:
arr = TIFF_instance.read_image()
arr.shape # gives (200, 200)
and you would have to loop over each image by hand with TIFF.iter_images().
"""
@classmethod
def open(cls, filename, mode='r'):
""" just like TIFF.open, except returns a TIFF3D instance.
"""
# monkey-patch the restype:
old_restype = libtiff.TIFFOpen.restype
libtiff.TIFFOpen.restype = TIFF3D
# actually call the library function:
tiff = libtiff.TIFFOpen(filename, mode)
# restore the old restype:
libtiff.TIFFOpen.restype = old_restype
if tiff.value is None:
raise TypeError ('Failed to open file '+`filename`)
return tiff
@debug
def read_image(self, verbose=False, as3d=True):
""" Read image from TIFF and return it as a numpy array.
If as3d is passed True (default), will attempt to read multiple
directories, and restore as slices in a 3D array. ASSUMES that all
images in the tiff file have the same width, height, bits-per-sample,
compression, and so on. If you get a segfault, this is probably the
problem.
"""
if not as3d:
return TIFF.read_image(self, verbose)
# Code is initially copy-paste from TIFF:
width = self.GetField('ImageWidth')
height = self.GetField('ImageLength')
bits = self.GetField('BitsPerSample')
sample_format = self.GetField('SampleFormat')
compression = self.GetField('Compression')
typ = self.get_numpy_type(bits, sample_format)
if typ is None:
if bits==1:
typ = np.uint8
itemsize = 1
elif bits==4:
typ = np.uint32
| |
for this instruction.
op = 0xa5
#: The JVM instruction name as appears in the specification.
name = 'if_acmpeq'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class if_acmpne(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0xa6
#: The JVM instruction name as appears in the specification.
name = 'if_acmpne'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class if_icmpeq(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x9f
#: The JVM instruction name as appears in the specification.
name = 'if_icmpeq'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class if_icmpne(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0xa0
#: The JVM instruction name as appears in the specification.
name = 'if_icmpne'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class if_icmplt(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0xa1
#: The JVM instruction name as appears in the specification.
name = 'if_icmplt'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class if_icmpge(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0xa2
#: The JVM instruction name as appears in the specification.
name = 'if_icmpge'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class if_icmpgt(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0xa3
#: The JVM instruction name as appears in the specification.
name = 'if_icmpgt'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class if_icmple(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0xa4
#: The JVM instruction name as appears in the specification.
name = 'if_icmple'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class ifeq(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x99
#: The JVM instruction name as appears in the specification.
name = 'ifeq'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class ifne(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x9a
#: The JVM instruction name as appears in the specification.
name = 'ifne'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class iflt(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x9b
#: The JVM instruction name as appears in the specification.
name = 'iflt'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class ifge(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x9c
#: The JVM instruction name as appears in the specification.
name = 'ifge'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class ifgt(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x9d
#: The JVM instruction name as appears in the specification.
name = 'ifgt'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class ifle(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x9e
#: The JVM instruction name as appears in the specification.
name = 'ifle'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class ifnonnull(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0xc7
#: The JVM instruction name as appears in the specification.
name = 'ifnonnull'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class ifnull(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0xc6
#: The JVM instruction name as appears in the specification.
name = 'ifnull'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>h', 'B'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class iinc(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x84
#: The JVM instruction name as appears in the specification.
name = 'iinc'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>B', 'I'), ('>B', 'L'))
#: True if this instruction can be prefixed by WIDE.
can_be_wide = True
class iload(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x15
#: The JVM instruction name as appears in the specification.
name = 'iload'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = (('>B', 'I'),)
#: True if this instruction can be prefixed by WIDE.
can_be_wide = True
class iload_0(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x1a
#: The JVM instruction name as appears in the specification.
name = 'iload_0'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = ()
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class iload_1(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x1b
#: The JVM instruction name as appears in the specification.
name = 'iload_1'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = ()
#: True if this instruction can be prefixed by WIDE.
can_be_wide = False
class iload_2(Instruction):
""""""
__slots__ = ()
#: Numerical opcode for this instruction.
op = 0x1c
#: The JVM instruction name as appears in the specification.
name = 'iload_2'
#: Alias for the `name` property.
mnemonic = name
#: List of operands this instruction takes, if any.
fmt = ()
| |
str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps2)).replace('disang_file', 'disang%02d' %int(i)))
else:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps1)).replace('disang_file', 'disang%02d' %int(i)))
# Create preparation files
if (stage == 'prep'):
for i in range(0, num_sim):
with open('../amber_files/mdin-prep', "rt") as fin:
with open("./mdin-%03d" %int(i), "wt") as fout:
if i == 0:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps1)).replace('disang_file', 'disang%03d' %int(i)))
else:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps2)).replace('disang_file', 'disang%03d' %int(i)))
# Create free energy files
if (stage == 'fe'):
if (comp != 'c' and comp != 'r'):
for i in range(0, num_sim+1):
with open('../amber_files/mdin-rest', "rt") as fin:
with open("./mdin-%02d" %int(i), "wt") as fout:
if i == 1 or i == 0:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps1)).replace('disang_file', 'disang'))
else:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps2)).replace('disang_file', 'disang'))
else:
for i in range(0, num_sim+1):
with open('../amber_files/mdin-lig', "rt") as fin:
with open("./mdin-%02d" %int(i), "wt") as fout:
if i == 1 or i == 0:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps1)).replace('disang_file', 'disang'))
else:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps2)).replace('disang_file', 'disang'))
# Create running scripts for local and server
if (stage == 'fe'):
if (comp != 'c' and comp != 'r'):
with open('../run_files/local-'+stage+'.bash', "rt") as fin:
with open("./run-local.bash", "wt") as fout:
for line in fin:
fout.write(line)
with open('../run_files/PBS-'+stage, "rt") as fin:
with open("./PBS-run", "wt") as fout:
for line in fin:
fout.write(line.replace('STAGE', pose).replace('POSE', '%s%02d' %(comp, int(win))))
else:
with open('../run_files/local-lig.bash', "rt") as fin:
with open("./run-local.bash", "wt") as fout:
for line in fin:
fout.write(line)
with open('../run_files/PBS-lig', "rt") as fin:
with open("./PBS-run", "wt") as fout:
for line in fin:
fout.write(line.replace('STAGE', pose).replace('POSE', '%s%02d' %(comp, int(win))))
else:
with open('../run_files/local-'+stage+'.bash', "rt") as fin:
with open("./run-local.bash", "wt") as fout:
for line in fin:
fout.write(line.replace('RANGE', str(rng)))
with open('../run_files/PBS-'+stage, "rt") as fin:
with open("./PBS-run", "wt") as fout:
for line in fin:
fout.write(line.replace('STAGE', stage).replace('POSE', pose).replace('RANGE', str(rng)))
os.chdir('../')
def dec_files(temperature, mol, num_sim, pose, comp, win, stage, steps1, steps2, weight, lambdas, dec_method, ntwx):
# Find anchors
with open('disang.rest', 'r') as f:
data = f.readline().split()
L1 = data[6].strip()
L2 = data[7].strip()
L3 = data[8].strip()
# Get number of atoms in vacuum
with open('./vac.pdb') as myfile:
data = myfile.readlines()
vac_atoms = data[-3][6:11].strip()
if (comp == 'v'):
# Create simulation files for vdw decoupling
if (dec_method == 'sdr'):
# Simulation files for simultaneous decoupling
with open('./vac.pdb') as myfile:
data = myfile.readlines()
mk2 = int(data[-3][22:26].strip())
mk1 = int(mk2 - 1)
for i in range(0, num_sim+1):
with open('../amber_files/mdin-lj', "rt") as fin:
with open("./mdin-%02d" %int(i), "wt") as fout:
if i == 1 or i == 0:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps1)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)))
else:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps2)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)))
mdin = open("./mdin-%02d" %int(i), 'a')
mdin.write(' mbar_states = %02d\n' %len(lambdas))
mdin.write(' mbar_lambda = ')
for i in range(0, len(lambdas)):
mdin.write(' %6.5f,' %(lambdas[i]))
mdin.write('\n')
mdin.write(' infe = 1,\n')
mdin.write(' /\n')
mdin.write(' &pmd \n')
mdin.write(' output_file = \'cmass.txt\' \n')
mdin.write(' output_freq = %02d \n' % int(ntwx))
mdin.write(' cv_file = \'cv.in\' \n')
mdin.write(' /\n')
mdin.write(' &wt type = \'END\' , /\n')
mdin.write('DISANG=disang.rest\n')
mdin.write('LISTOUT=POUT\n')
with open("../amber_files/eqnpt-lj.in", "rt") as fin:
with open("./eqnpt.in", "wt") as fout:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)))
with open("../amber_files/heat-lj.in", "rt") as fin:
with open("./heat.in", "wt") as fout:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)))
# Simulation files for double decoupling
elif (dec_method == 'dd'):
with open('./vac.pdb') as myfile:
data = myfile.readlines()
mk1 = int(data[-3][22:26].strip())
for i in range(0, num_sim+1):
with open('../amber_files/mdin-lj-dd', "rt") as fin:
with open("./mdin-%02d" %int(i), "wt") as fout:
if i == 1 or i == 0:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps1)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)))
else:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps2)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)))
mdin = open("./mdin-%02d" %int(i), 'a')
mdin.write(' mbar_states = %02d\n' %len(lambdas))
mdin.write(' mbar_lambda = ')
for i in range(0, len(lambdas)):
mdin.write(' %6.5f,' %(lambdas[i]))
mdin.write('\n')
mdin.write(' infe = 1,\n')
mdin.write(' /\n')
mdin.write(' &pmd \n')
mdin.write(' output_file = \'cmass.txt\' \n')
mdin.write(' output_freq = %02d \n' % int(ntwx))
mdin.write(' cv_file = \'cv.in\' \n')
mdin.write(' /\n')
mdin.write(' &wt type = \'END\' , /\n')
mdin.write('DISANG=disang.rest\n')
mdin.write('LISTOUT=POUT\n')
with open("../amber_files/eqnpt-lj-dd.in", "rt") as fin:
with open("./eqnpt.in", "wt") as fout:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)))
with open("../amber_files/heat-lj-dd.in", "rt") as fin:
with open("./heat.in", "wt") as fout:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)))
# Create running scripts for local and server
with open('../run_files/local-dd.bash', "rt") as fin:
with open("./run-local.bash", "wt") as fout:
for line in fin:
fout.write(line)
with open('../run_files/PBS-dd', "rt") as fin:
with open("./PBS-run", "wt") as fout:
for line in fin:
fout.write(line.replace('STAGE', pose).replace('POSE', '%s%02d' %(comp, int(win))))
if (comp == 'e'):
# Create simulation files for charge decoupling
if (dec_method == 'sdr'):
# Simulation files for simultaneous decoupling
with open('./vac.pdb') as myfile:
data = myfile.readlines()
mk4 = int(data[-3][22:26].strip())
mk3 = int(mk4 - 1)
mk2 = int(mk4 - 2)
mk1 = int(mk4 - 3)
for i in range(0, num_sim+1):
with open('../amber_files/mdin-ch', "rt") as fin:
with open("./mdin-%02d" %int(i), "wt") as fout:
if i == 1 or i == 0:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps1)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)).replace('mk3',str(mk3)).replace('mk4',str(mk4)))
else:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps2)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)).replace('mk3',str(mk3)).replace('mk4',str(mk4)))
mdin = open("./mdin-%02d" %int(i), 'a')
mdin.write(' mbar_states = %02d\n' %len(lambdas))
mdin.write(' mbar_lambda = ')
for i in range(0, len(lambdas)):
mdin.write(' %6.5f,' %(lambdas[i]))
mdin.write('\n')
mdin.write(' infe = 1,\n')
mdin.write(' /\n')
mdin.write(' &pmd \n')
mdin.write(' output_file = \'cmass.txt\' \n')
mdin.write(' output_freq = %02d \n' % int(ntwx))
mdin.write(' cv_file = \'cv.in\' \n')
mdin.write(' /\n')
mdin.write(' &wt type = \'END\' , /\n')
mdin.write('DISANG=disang.rest\n')
mdin.write('LISTOUT=POUT\n')
with open("../amber_files/eqnpt-ch.in", "rt") as fin:
with open("./eqnpt.in", "wt") as fout:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)).replace('mk3',str(mk3)).replace('mk4',str(mk4)))
with open("../amber_files/heat-ch.in", "rt") as fin:
with open("./heat.in", "wt") as fout:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)).replace('mk3',str(mk3)).replace('mk4',str(mk4)))
elif (dec_method == 'dd'):
with open('./vac.pdb') as myfile:
# Simulation files for double decoupling
data = myfile.readlines()
mk2 = int(data[-3][22:26].strip())
mk1 = int(mk2 - 1)
for i in range(0, num_sim+1):
with open('../amber_files/mdin-ch-dd', "rt") as fin:
with open("./mdin-%02d" %int(i), "wt") as fout:
if i == 1 or i == 0:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps1)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)))
else:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps2)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)))
mdin = open("./mdin-%02d" %int(i), 'a')
mdin.write(' mbar_states = %02d\n' %len(lambdas))
mdin.write(' mbar_lambda = ')
for i in range(0, len(lambdas)):
mdin.write(' %6.5f,' %(lambdas[i]))
mdin.write('\n')
mdin.write(' infe = 1,\n')
mdin.write(' /\n')
mdin.write(' &pmd \n')
mdin.write(' output_file = \'cmass.txt\' \n')
mdin.write(' output_freq = %02d \n' % int(ntwx))
mdin.write(' cv_file = \'cv.in\' \n')
mdin.write(' /\n')
mdin.write(' &wt type = \'END\' , /\n')
mdin.write('DISANG=disang.rest\n')
mdin.write('LISTOUT=POUT\n')
with open("../amber_files/eqnpt-ch-dd.in", "rt") as fin:
with open("./eqnpt.in", "wt") as fout:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)))
with open("../amber_files/heat-ch-dd.in", "rt") as fin:
with open("./heat.in", "wt") as fout:
for line in fin:
fout.write(line.replace('_temperature_', str(temperature)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)))
# Create running scripts for local and server
with open('../run_files/local-dd.bash', "rt") as fin:
with open("./run-local.bash", "wt") as fout:
for line in fin:
fout.write(line)
with open('../run_files/PBS-dd', "rt") as fin:
with open("./PBS-run", "wt") as fout:
for line in fin:
fout.write(line.replace('STAGE', pose).replace('POSE', '%s%02d' %(comp, int(win))))
if (comp == 'f'):
mk1 = '1'
mk2 = '2'
for i in range(0, num_sim+1):
with open('../amber_files/mdin-ch-dd', "rt") as fin:
with open("./mdin-%02d" %int(i), "wt") as fout:
if i == 1 or i == 0:
for line in fin:
if not 'restraint' in line and not 'ntr = 1' in line and not 'ntwprt' in line and not 'infe' in line:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps1)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)))
else:
for line in fin:
if not 'restraint' in line and not 'ntr = 1' in line and not 'ntwprt' in line and not 'infe' in line:
fout.write(line.replace('_temperature_', str(temperature)).replace('_num-atoms_', str(vac_atoms)).replace('_num-steps_', str(steps2)).replace('lbd_val', '%6.5f' %float(weight)).replace('mk1',str(mk1)).replace('mk2',str(mk2)))
mdin = open("./mdin-%02d" %int(i), 'a')
mdin.write(' mbar_states = %02d\n' %len(lambdas))
mdin.write(' mbar_lambda = ')
for i in range(0, len(lambdas)):
mdin.write(' %6.5f,' %(lambdas[i]))
mdin.write('\n')
mdin.write(' /\n')
mdin.write(' &wt type = \'END\' , /\n')
mdin.write('DISANG=disang.rest\n')
mdin.write('LISTOUT=POUT\n')
with open("../amber_files/heat-ch-lig.in", "rt") as fin:
with open("./heat.in", "wt") as fout:
for | |
event.vertices[0]
mu.setTrackForDxyDz(self.cfg_ana.muon_dxydz_track)
# Set tight id if specified
if hasattr(self.cfg_ana, "mu_tightId"):
for mu in allmuons:
mu.tightIdResult = mu.muonID(self.cfg_ana.mu_tightId)
# Compute relIso in 0.3 and 0.4 cones
for mu in allmuons:
if self.cfg_ana.mu_isoCorr=="rhoArea" :
mu.absIso03 = (mu.pfIsolationR03().sumChargedHadronPt + max( mu.pfIsolationR03().sumNeutralHadronEt + mu.pfIsolationR03().sumPhotonEt - mu.rho * mu.EffectiveArea03,0.0))
mu.absIso04 = (mu.pfIsolationR04().sumChargedHadronPt + max( mu.pfIsolationR04().sumNeutralHadronEt + mu.pfIsolationR04().sumPhotonEt - mu.rho * mu.EffectiveArea04,0.0))
elif self.cfg_ana.mu_isoCorr=="deltaBeta" :
mu.absIso03 = (mu.pfIsolationR03().sumChargedHadronPt + max( mu.pfIsolationR03().sumNeutralHadronEt + mu.pfIsolationR03().sumPhotonEt - mu.pfIsolationR03().sumPUPt/2,0.0))
mu.absIso04 = (mu.pfIsolationR04().sumChargedHadronPt + max( mu.pfIsolationR04().sumNeutralHadronEt + mu.pfIsolationR04().sumPhotonEt - mu.pfIsolationR04().sumPUPt/2,0.0))
else :
raise RuntimeError("Unsupported mu_isoCorr name '" + str(self.cfg_ana.mu_isoCorr) + "'! For now only 'rhoArea' and 'deltaBeta' are supported.")
mu.relIso03 = mu.absIso03/mu.pt()
mu.relIso04 = mu.absIso04/mu.pt()
return allmuons
def makeAllElectrons(self, event):
"""
make a list of all electrons, and apply basic corrections to them
"""
allelectrons = map( Electron, self.handles['electrons'].product() )
## Duplicate removal for fast sim (to be checked if still necessary in latest greatest 5.3.X releases)
allelenodup = []
for e in allelectrons:
dup = False
for e2 in allelenodup:
if abs(e.pt()-e2.pt()) < 1e-6 and abs(e.eta()-e2.eta()) < 1e-6 and abs(e.phi()-e2.phi()) < 1e-6 and e.charge() == e2.charge():
dup = True
break
if not dup: allelenodup.append(e)
allelectrons = allelenodup
# fill EA for rho-corrected isolation
convs, bspot = self.handles['conversions'].product(), self.handles['beamspot'].product()
for ele in allelectrons:
ele.rho = float(self.handles['rhoEle'].product()[0])
ele.rhoHLT = float(self.handles['rhoEleHLT'].product()[0])
ele.conversions = convs
ele.beamspot = bspot
if self.eleEffectiveArea == "Data2012":
# https://twiki.cern.ch/twiki/bin/viewauth/CMS/EgammaEARhoCorrection?rev=14
SCEta = abs(ele.superCluster().eta())
if SCEta < 1.0 : ele.EffectiveArea03 = 0.13 # 0.130;
elif SCEta < 1.479: ele.EffectiveArea03 = 0.14 # 0.137;
elif SCEta < 2.0 : ele.EffectiveArea03 = 0.07 # 0.067;
elif SCEta < 2.2 : ele.EffectiveArea03 = 0.09 # 0.089;
elif SCEta < 2.3 : ele.EffectiveArea03 = 0.11 # 0.107;
elif SCEta < 2.4 : ele.EffectiveArea03 = 0.11 # 0.110;
else : ele.EffectiveArea03 = 0.14 # 0.138;
if SCEta < 1.0 : ele.EffectiveArea04 = 0.208;
elif SCEta < 1.479: ele.EffectiveArea04 = 0.209;
elif SCEta < 2.0 : ele.EffectiveArea04 = 0.115;
elif SCEta < 2.2 : ele.EffectiveArea04 = 0.143;
elif SCEta < 2.3 : ele.EffectiveArea04 = 0.183;
elif SCEta < 2.4 : ele.EffectiveArea04 = 0.194;
else : ele.EffectiveArea04 = 0.261;
elif self.eleEffectiveArea == "Phys14_25ns_v1":
aeta = abs(ele.eta())
if aeta < 0.800: ele.EffectiveArea03 = 0.1013
elif aeta < 1.300: ele.EffectiveArea03 = 0.0988
elif aeta < 2.000: ele.EffectiveArea03 = 0.0572
elif aeta < 2.200: ele.EffectiveArea03 = 0.0842
else: ele.EffectiveArea03 = 0.1530
if aeta < 0.800: ele.EffectiveArea04 = 0.1830
elif aeta < 1.300: ele.EffectiveArea04 = 0.1734
elif aeta < 2.000: ele.EffectiveArea04 = 0.1077
elif aeta < 2.200: ele.EffectiveArea04 = 0.1565
else: ele.EffectiveArea04 = 0.2680
elif self.eleEffectiveArea == "Spring15_50ns_v1":
SCEta = abs(ele.superCluster().eta())
## ----- https://github.com/ikrav/cmssw/blob/egm_id_747_v2/RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_50ns.txt
if SCEta < 0.800: ele.EffectiveArea03 = 0.0973
elif SCEta < 1.300: ele.EffectiveArea03 = 0.0954
elif SCEta < 2.000: ele.EffectiveArea03 = 0.0632
elif SCEta < 2.200: ele.EffectiveArea03 = 0.0727
else: ele.EffectiveArea03 = 0.1337
# warning: EAs not computed for cone DR=0.4 yet. Do not correct
ele.EffectiveArea04 = 0.0
elif self.eleEffectiveArea == "Spring15_25ns_v1":
SCEta = abs(ele.superCluster().eta())
## ----- https://github.com/ikrav/cmssw/blob/egm_id_747_v2/RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_25ns.txt
if SCEta < 1.000: ele.EffectiveArea03 = 0.1752
elif SCEta < 1.479: ele.EffectiveArea03 = 0.1862
elif SCEta < 2.000: ele.EffectiveArea03 = 0.1411
elif SCEta < 2.200: ele.EffectiveArea03 = 0.1534
elif SCEta < 2.300: ele.EffectiveArea03 = 0.1903
elif SCEta < 2.400: ele.EffectiveArea03 = 0.2243
else: ele.EffectiveArea03 = 0.2687
# warning: EAs not computed for cone DR=0.4 yet. Do not correct
ele.EffectiveArea04 = 0.0
elif self.eleEffectiveArea == "Spring16_25ns_v1":
SCEta = abs(ele.superCluster().eta())
## ----- https://github.com/ikrav/cmssw/blob/egm_id_747_v2/RecoEgamma/ElectronIdentification/data/Spring15/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_25ns.txt
if SCEta < 1.000: ele.EffectiveArea03 = 0.1703
elif SCEta < 1.479: ele.EffectiveArea03 = 0.1715
elif SCEta < 2.000: ele.EffectiveArea03 = 0.1213
elif SCEta < 2.200: ele.EffectiveArea03 = 0.1230
elif SCEta < 2.300: ele.EffectiveArea03 = 0.1635
elif SCEta < 2.400: ele.EffectiveArea03 = 0.1937
else: ele.EffectiveArea03 = 0.2393
# warning: EAs not computed for cone DR=0.4 yet. Do not correct
ele.EffectiveArea04 = 0.0
elif self.eleEffectiveArea == "Fall17":
SCEta = abs(ele.superCluster().eta())
## from RecoEgamma/ElectronIdentification/data/Fall17/effAreaElectrons_cone03_pfNeuHadronsAndPhotons_92X.txt
if SCEta < 1.000: ele.EffectiveArea03 = 0.1566
elif SCEta < 1.479: ele.EffectiveArea03 = 0.1626
elif SCEta < 2.000: ele.EffectiveArea03 = 0.1073
elif SCEta < 2.200: ele.EffectiveArea03 = 0.0854
elif SCEta < 2.300: ele.EffectiveArea03 = 0.1051
elif SCEta < 2.400: ele.EffectiveArea03 = 0.1204
else: ele.EffectiveArea03 = 0.1524
# warning: EAs not computed for cone DR=0.4, use the values for DR=0.3 scaled by 16/9 instead
ele.EffectiveArea04 = ele.EffectiveArea03*16./9.
else: raise RuntimeError("Unsupported value for ele_effectiveAreas: can only use Data2012 (rho: ?), Phys14_v1 and Spring15_v1 (rho: fixedGridRhoFastjetAll)")
# Electron scale calibrations
if self.cfg_ana.doElectronScaleCorrections:
for ele in allelectrons:
self.electronEnergyCalibrator.correct(ele, event.run)
# Attach the vertex
goodVertices = getattr(event, self.vertexChoice)
for ele in allelectrons:
ele.associatedVertex = goodVertices[0] if len(goodVertices)>0 else event.vertices[0]
# Attach the event (for MVA Id)
for ele in allelectrons:
ele.event = event.input.object()
# Compute relIso with R=0.3 and R=0.4 cones
for ele in allelectrons:
if self.cfg_ana.ele_isoCorr=="rhoArea" :
ele.absIso03 = (ele.chargedHadronIso(0.3) + max(ele.neutralHadronIso(0.3)+ele.photonIso(0.3)-ele.rho*ele.EffectiveArea03,0))
ele.absIso04 = (ele.chargedHadronIso(0.4) + max(ele.neutralHadronIso(0.4)+ele.photonIso(0.4)-ele.rho*ele.EffectiveArea04,0))
elif self.cfg_ana.ele_isoCorr=="deltaBeta" :
ele.absIso03 = (ele.chargedHadronIso(0.3) + max( ele.neutralHadronIso(0.3)+ele.photonIso(0.3) - ele.puChargedHadronIso(0.3)/2, 0.0))
ele.absIso04 = (ele.chargedHadronIso(0.4) + max( ele.neutralHadronIso(0.4)+ele.photonIso(0.4) - ele.puChargedHadronIso(0.4)/2, 0.0))
else :
raise RuntimeError("Unsupported ele_isoCorr name '" + str(self.cfg_ana.ele_isoCorr) + "'! For now only 'rhoArea' and 'deltaBeta' are supported.")
ele.relIso03 = ele.absIso03/ele.pt()
ele.relIso04 = ele.absIso04/ele.pt()
# Set tight MVA id
for ele in allelectrons:
if self.cfg_ana.ele_tightId=="Cuts_SPRING15_25ns_v1_ConvVetoDxyDz" :
ele.tightIdResult = -1 + ele.electronID("POG_Cuts_ID_SPRING15_25ns_v1_ConvVetoDxyDz_Veto") + 1*ele.electronID("POG_Cuts_ID_SPRING15_25ns_v1_ConvVetoDxyDz_Loose") + 1*ele.electronID("POG_Cuts_ID_SPRING15_25ns_v1_ConvVetoDxyDz_Medium") + 1*ele.electronID("POG_Cuts_ID_SPRING15_25ns_v1_ConvVetoDxyDz_Tight")
elif self.cfg_ana.ele_tightId=="Cuts_SPRING16_25ns_v1_ConvVetoDxyDz" :
ele.tightIdResult = -1 + 1*ele.electronID("POG_Cuts_ID_SPRING16_25ns_v1_ConvVetoDxyDz_Veto") + 1*ele.electronID("POG_Cuts_ID_SPRING16_25ns_v1_ConvVetoDxyDz_Loose") + 1*ele.electronID("POG_Cuts_ID_SPRING16_25ns_v1_ConvVetoDxyDz_Medium") + 1*ele.electronID("POG_Cuts_ID_SPRING16_25ns_v1_ConvVetoDxyDz_Tight")
elif self.cfg_ana.ele_tightId=="Cuts_FALL17_94X_v1_ConvVetoDxyDz" :
ele.tightIdResult = -1 + 1*ele.electronID("POG_Cuts_ID_FALL17_94X_v1_ConvVetoDxyDz_Veto") + 1*ele.electronID("POG_Cuts_ID_FALL17_94X_v1_ConvVetoDxyDz_Loose") + 1*ele.electronID("POG_Cuts_ID_FALL17_94X_v1_ConvVetoDxyDz_Medium") + 1*ele.electronID("POG_Cuts_ID_FALL17_94X_v1_ConvVetoDxyDz_Tight")
elif self.cfg_ana.ele_tightId.startswith("mvaEleID-") and ("-wp" not in self.cfg_ana.ele_tightId):
ele.tightIdResult = ele.countWP(self.cfg_ana.ele_tightId, WPs=["wpLoose", "wp90", "wp80"])
elif self.cfg_ana.ele_tightId.startswith("cutBasedElectronID-") and (self.cfg_ana.ele_tightId.split("-")[-1] in ["V1","V2"]):
ele.tightIdResult = ele.countWP(self.cfg_ana.ele_tightId, WPs=["veto", "loose", "medium", "tight"])
else :
try:
ele.tightIdResult = ele.electronID(self.cfg_ana.ele_tightId)
except RuntimeError:
raise RuntimeError("Unsupported ele_tightId name '" + str(self.cfg_ana.ele_tightId) + "'! For now only 'MVA' and 'Cuts_2012' are supported, in addition to what provided in Electron.py.")
ele.hltSafeIdResult = ele.electronID("POG_Cuts_ID_SPRING16_25ns_v1_HLT")
return allelectrons
def attachMiniIsolation(self, mu):
mu.miniIsoR = 10.0/min(max(mu.pt(), 50),200)
# -- version with increasing cone at low pT, gives slightly better performance for tight cuts and low pt leptons
# mu.miniIsoR = 10.0/min(max(mu.pt(), 50),200) if mu.pt() > 20 else 4.0/min(max(mu.pt(),10),20)
what = "mu" if (abs(mu.pdgId()) == 13) else ("eleB" if mu.isEB() else "eleE")
if self.doMiniIsolation == "precomputed":
mu.miniAbsIsoCharged = mu.miniPFIsolation().chargedHadronIso()
elif what == "mu":
mu.miniAbsIsoCharged = self.IsolationComputer.chargedAbsIso(mu.physObj, mu.miniIsoR, {"mu":0.0001,"eleB":0,"eleE":0.015}[what], 0.0);
if self.doFixedConeIsoWithMiniIsoVeto:
mu.AbsIsoMIVCharged03 = self.IsolationComputer.chargedAbsIso(mu.physObj, 0.3, {"mu":0.0001,"eleB":0,"eleE":0.015}[what], 0.0);
mu.AbsIsoMIVCharged04 = self.IsolationComputer.chargedAbsIso(mu.physObj, 0.4, {"mu":0.0001,"eleB":0,"eleE":0.015}[what], 0.0);
else:
mu.miniAbsIsoCharged = self.IsolationComputer.chargedAbsIso(mu.physObj, mu.miniIsoR, {"mu":0.0001,"eleB":0,"eleE":0.015}[what], 0.0,self.IsolationComputer.selfVetoNone);
if self.doFixedConeIsoWithMiniIsoVeto:
mu.AbsIsoMIVCharged03 = self.IsolationComputer.chargedAbsIso(mu.physObj, 0.3, {"mu":0.0001,"eleB":0,"eleE":0.015}[what], 0.0,self.IsolationComputer.selfVetoNone);
mu.AbsIsoMIVCharged04 = self.IsolationComputer.chargedAbsIso(mu.physObj, 0.4, {"mu":0.0001,"eleB":0,"eleE":0.015}[what], 0.0,self.IsolationComputer.selfVetoNone);
if self.miniIsolationPUCorr == None: puCorr = self.cfg_ana.mu_isoCorr if what=="mu" else self.cfg_ana.ele_isoCorr
else: puCorr = self.miniIsolationPUCorr
if puCorr == "weights":
if what == "mu":
mu.miniAbsIsoNeutral = self.IsolationComputer.neutralAbsIsoWeighted(mu.physObj, mu.miniIsoR, 0.01, 0.5);
else:
mu.miniAbsIsoNeutral = ( self.IsolationComputer.photonAbsIsoWeighted( mu.physObj, mu.miniIsoR, 0.08 if what == "eleE" else 0.0, 0.0, self.IsolationComputer.selfVetoNone) +
self.IsolationComputer.neutralHadAbsIsoWeighted(mu.physObj, mu.miniIsoR, 0.0, 0.0, self.IsolationComputer.selfVetoNone) )
else:
if self.doMiniIsolation == "precomputed":
mu.miniAbsIsoPho = mu.miniPFIsolation().photonIso()
mu.miniAbsIsoNHad = mu.miniPFIsolation().neutralHadronIso()
mu.miniAbsIsoNeutral = mu.miniAbsIsoPho + mu.miniAbsIsoNHad
elif what == "mu":
mu.miniAbsIsoNeutral = self.IsolationComputer.neutralAbsIsoRaw(mu.physObj, mu.miniIsoR, 0.01, 0.5);
else:
mu.miniAbsIsoPho = self.IsolationComputer.photonAbsIsoRaw( mu.physObj, mu.miniIsoR, 0.08 if what == "eleE" else 0.0, 0.0, self.IsolationComputer.selfVetoNone)
mu.miniAbsIsoNHad = self.IsolationComputer.neutralHadAbsIsoRaw(mu.physObj, mu.miniIsoR, 0.0, 0.0, self.IsolationComputer.selfVetoNone)
mu.miniAbsIsoNeutral = mu.miniAbsIsoPho + mu.miniAbsIsoNHad
# -- version relying on PF candidate vetos; apparently less performant, and the isolation computed at RECO level doesn't have them
#mu.miniAbsIsoPhoSV = self.IsolationComputer.photonAbsIsoRaw( mu.physObj, mu.miniIsoR, 0.0, 0.0)
#mu.miniAbsIsoNHadSV = self.IsolationComputer.neutralHadAbsIsoRaw(mu.physObj, mu.miniIsoR, 0.0, 0.0)
#mu.miniAbsIsoNeutral = mu.miniAbsIsoPhoSV + mu.miniAbsIsoNHadSV
if puCorr == "rhoArea":
mu.miniAbsIsoNeutral = max(0.0, mu.miniAbsIsoNeutral - mu.rho * mu.EffectiveArea03 * (mu.miniIsoR/0.3)**2)
elif puCorr == "deltaBeta":
if self.doMiniIsolation == "precomputed":
mu.miniAbsIsoPU = mu.miniPFIsolation().puChargedHadronIso()
elif what == "mu":
mu.miniAbsIsoPU = self.IsolationComputer.puAbsIso(mu.physObj, mu.miniIsoR, 0.01, 0.5);
else:
mu.miniAbsIsoPU = self.IsolationComputer.puAbsIso(mu.physObj, mu.miniIsoR, 0.015 if what == "eleE" else 0.0, 0.0,self.IsolationComputer.selfVetoNone);
mu.miniAbsIsoNeutral = max(0.0, mu.miniAbsIsoNeutral - 0.5*mu.miniAbsIsoPU)
elif puCorr != 'raw':
raise RuntimeError("Unsupported miniIsolationCorr name '" + puCorr + "'! For now only 'rhoArea', 'deltaBeta', 'raw', 'weights' are supported (and 'weights' is not | |
<reponame>dmivilensky/highlight
import json
# Create your views here.
import os
import asyncio
import sys
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from bson.objectid import ObjectId
# from .forms import UploadFileForm
import docx
import time
import datetime
from .logger import Logger
from .forms import UploadFileForm
HTTPMETHOD: str = "POST"
if __name__ != '__main__':
from . import registration as rg
from . import get_functions as gf
from . import find_functions as ff
from . import main as mn
from .utils import doc_ids_replace, users_replace_ids, handle_uploaded_file, hashCode, get_params, replace_pieces_id, file_loader_module
lgr = Logger()
lgr.log("log", "is dir", os.path.isdir("../../python_scripts"))
lgr.log("log", "in dir", os.listdir("../../python_scripts"))
sys.path.insert(1, '../../python_scripts')
import python_mailer as p_m
if __name__ == '__main__':
# import registration as rg
import get_functions as gf
import find_functions as ff
import main as mn
ADKEY = "<KEY>"
ADHASH = 75953932291808146177936
@csrf_exempt
def index(request):
"""
:description: just says hello!
"""
return HttpResponse("HELLO, USER")
@csrf_exempt
def registration_cover(request):
"""
:description: registers user by name, surname, mi, email, languages, login, password, status (translator/chief/verif), vk account, tg account and fb account
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
name = params["name"]
surn = params["surname"]
mi = params["mi"]
email = params["email"]
langs = params["languages"]
login1 = params["login"]
pwd = params["password"]
status = params["status"]
vk = params["vk"]
fb = params["fb"]
tg = params["tg"]
result = rg.register(name, surn, mi, email, langs, login1, pwd, status, vk, tg, fb)
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def update_account(request):
"""
:description: (requires old password) updates user account with name, surname, mi, email, languages, login, password, status (translator/chief/verif), vk account, tg account and fb account. (all parameters are optional except old password)
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
result = rg.update_acc(params)
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
class JSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, datetime.datetime):
return str(o)
return json.JSONEncoder.default(self, o)
@csrf_exempt
def get_account(request):
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
result = rg.get_acc(params)
except KeyError:
result = {'code': "5001"}
text = JSONEncoder().encode(result)
return HttpResponse(text)
@csrf_exempt
def login_cover(request):
"""
:description: logins user by login, password and status (called type) (translator/chief/verif)
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
login1 = params["login"]
pwd = params["password"]
if "type" in params.keys():
type = params["type"]
else:
type = None
result = rg.log_in(login1, pwd, type=type)
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def verify_cover(request):
"""
:description: verifies user account by admin (key, decision, user account login)
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
key = params["key"]
decision = params["decision"]
ulog = params["login"]
if key == ADKEY:
result = rg.verify(ulog, "ADMITTED" if decision == "1" else "NOT")
result["document"] = "ADMITTED" if decision == "1" else "NOT"
else:
result = {'code': "2004"}
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def find_pieces_cover(request):
"""
:description: finds all pieces taken by user by user id
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
uid = params["id"]
if mn.is_there_any_body(uid):
result = ff.find_pieces(uid)
replace_pieces_id(result["document"], find_in_list=True)
else:
result = {'code': "2003"}
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def find_piece(request):
"""
:description: finds piece by its id and user id
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
uid = params["id"]
pid = params["piece_id"]
if mn.is_there_any_body(uid):
result = ff.find_piece(pid)
result["document"]["_id"] = str(result["document"]["_id"])
result["document"]["lastModified"] = str(result["document"]["lastModified"])
else:
result = {'code': "2003"}
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def find_doc_by_lang_cover(request):
"""
:description: finds all docs waiting for being translated on specific language
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
lang = params["language"]
result = ff.find_doc_by_lang(lang)
for f in result["document"]:
f["doc"]["_id"] = str(f["doc"]["_id"])
f["doc"]["lastModified"] = str(f["doc"]["lastModified"])
f = replace_pieces_id(f)
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def get_from_db_cover(request):
"""
:description: gets all documents matching search and tags
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
sch = params["search"]
tg = params["tags"]
page = params["page"] if "page" in params.keys() else -1
result = gf.get_from_db(sch, tg, page=page)
result = doc_ids_replace(result)
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def get_from_db_for_chief_cover(request):
"""
:description: gets all unverified documents matching search and tags for medics to check
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
sch = params["search"]
tg = params["tags"]
page = params["page"] if "page" in params.keys() else -1
result = gf.get_for_chief_from_db(sch, tg, page=page)
result = doc_ids_replace(result)
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def get_from_db_for_verst_cover(request):
"""
:description: gets all unformated documents matching search and tags for верстальщики to check
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
sch = params["search"]
tg = params["tags"]
page = params["page"] if "page" in params.keys() else -1
result = gf.get_for_verst_from_db(sch, tg, page=page)
result = doc_ids_replace(result)
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def get_users_cover(request):
"""
:description: gets all unverified users for admin
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
key = params["key"]
if key == ADKEY:
result = gf.get_users()
result = users_replace_ids(result)
else:
result = {'code': "2004"}
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def get_trans_and_docs_cover(request):
"""
:description: counts all verified translators and documents both translated and not
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
key = params["key"]
if key == ADKEY:
result = gf.get_docs_and_trans()
else:
result = {'code': "2004"}
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def get_translator_stats_cover(request):
"""
:description: gets all verified translators and their info
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
key = params["key"]
if key == ADKEY:
result = gf.get_translators_stat()
result = users_replace_ids(result, replace_login=True)
else:
result = {'code': "2004"}
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def get_user_by_doc_or_piece_cover(request):
"""
:description: gets all coworkers for user on the same document
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
# uid = params["id"]
rid = params["find_id"]
# if mn.is_there_any_body(uid):
result = gf.get_users_by_doc_or_piece(rid)
# result = users_replace_ids(result, replace_partly=True)
# else:
# result = {'code': "2004"}
except KeyError:
result = {'code': "5001"}
text = JSONEncoder().encode(result)
return HttpResponse(text)
@csrf_exempt
def get_file_stat_cover(request):
"""
:description: gets file info about status importance pieces and name
"""
result = {'code': "4040"}
# if request.method == HTTPMETHOD:
params = get_params(request)
try:
key = params["key"]
if key == ADKEY:
result = gf.get_file_stat()
else:
result = {'code': "2004"}
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def get_pieces_stat_cover(request):
"""
:description: gets pieces and their translators (who is working on what)
"""
result = {'code': "4040"}
params = get_params(request)
try:
key = params["key"]
if key == ADKEY:
result = gf.get_pieces_stat()
else:
result = {'code': "2004"}
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def verify_file_cover(request):
"""
:description: verifies file as medic
"""
result = {'code': "4040"}
lgr, path = file_loader_module(request)
params = get_params(request)
try:
# result = for_verif(params, result)
did = params["document_id"]
uid = params["id"]
# path = params["path"]
result = mn.verify_file(did, uid, path if path != "" else None)
# f = open('program_logs.txt', 'w+')
# f.write('fsucsess i: ' + str(iter))
# f.close()
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def markup_file(request):
"""
:description: verifies file as markuper
"""
result = {'code': "4040"}
lgr, path = file_loader_module(request)
params = get_params(request)
try:
# result = for_verif(params, result)
did = params["document_id"]
uid = params["id"]
# path = params["path"]
result = mn.verify_file(did, uid, path if path != "" else None)
# f = open('program_logs.txt', 'w+')
# f.write('fsucsess i: ' + str(iter))
# f.close()
except KeyError:
result = {'code': "5001"}
text = json.dumps(result)
return HttpResponse(text)
@csrf_exempt
def update_importance_cover(request):
"""
:description: updates importance
"""
result = {'code': "4040"}
# | |
import functools
import inspect
import logging
import re
import unicodedata
from typing import TYPE_CHECKING, Any, Dict, Iterable, Union
from anyascii import anyascii
from django.apps import apps
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.core.signals import setting_changed
from django.db.models import Model
from django.db.models.base import ModelBase
from django.dispatch import receiver
from django.http import HttpRequest
from django.utils.encoding import force_str
from django.utils.text import slugify
from django.utils.translation import check_for_language, get_supported_language_variant
if TYPE_CHECKING:
from wagtail.models import Site
logger = logging.getLogger(__name__)
WAGTAIL_APPEND_SLASH = getattr(settings, "WAGTAIL_APPEND_SLASH", True)
def camelcase_to_underscore(str):
# https://djangosnippets.org/snippets/585/
return (
re.sub("(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))", "_\\1", str).lower().strip("_")
)
def string_to_ascii(value):
"""
Convert a string to ascii.
"""
return str(anyascii(value))
def get_model_string(model):
"""
Returns a string that can be used to identify the specified model.
The format is: `app_label.ModelName`
This an be reversed with the `resolve_model_string` function
"""
return model._meta.app_label + "." + model.__name__
def resolve_model_string(model_string, default_app=None):
"""
Resolve an 'app_label.model_name' string into an actual model class.
If a model class is passed in, just return that.
Raises a LookupError if a model can not be found, or ValueError if passed
something that is neither a model or a string.
"""
if isinstance(model_string, str):
try:
app_label, model_name = model_string.split(".")
except ValueError:
if default_app is not None:
# If we can't split, assume a model in current app
app_label = default_app
model_name = model_string
else:
raise ValueError(
"Can not resolve {0!r} into a model. Model names "
"should be in the form app_label.model_name".format(model_string),
model_string,
)
return apps.get_model(app_label, model_name)
elif isinstance(model_string, type) and issubclass(model_string, Model):
return model_string
else:
raise ValueError(
"Can not resolve {0!r} into a model".format(model_string), model_string
)
SCRIPT_RE = re.compile(r"<(-*)/script>")
def escape_script(text):
"""
Escape `</script>` tags in 'text' so that it can be placed within a `<script>` block without
accidentally closing it. A '-' character will be inserted for each time it is escaped:
`<-/script>`, `<--/script>` etc.
"""
return SCRIPT_RE.sub(r"<-\1/script>", text)
SLUGIFY_RE = re.compile(r"[^\w\s-]", re.UNICODE)
def cautious_slugify(value):
"""
Convert a string to ASCII exactly as Django's slugify does, with the exception
that any non-ASCII alphanumeric characters (that cannot be ASCIIfied under Unicode
normalisation) are escaped into codes like 'u0421' instead of being deleted entirely.
This ensures that the result of slugifying e.g. Cyrillic text will not be an empty
string, and can thus be safely used as an identifier (albeit not a human-readable one).
"""
value = force_str(value)
# Normalize the string to decomposed unicode form. This causes accented Latin
# characters to be split into 'base character' + 'accent modifier'; the latter will
# be stripped out by the regexp, resulting in an ASCII-clean character that doesn't
# need to be escaped
value = unicodedata.normalize("NFKD", value)
# Strip out characters that aren't letterlike, underscores or hyphens,
# using the same regexp that slugify uses. This ensures that non-ASCII non-letters
# (e.g. accent modifiers, fancy punctuation) get stripped rather than escaped
value = SLUGIFY_RE.sub("", value)
# Encode as ASCII, escaping non-ASCII characters with backslashreplace, then convert
# back to a unicode string (which is what slugify expects)
value = value.encode("ascii", "backslashreplace").decode("ascii")
# Pass to slugify to perform final conversion (whitespace stripping, applying
# mark_safe); this will also strip out the backslashes from the 'backslashreplace'
# conversion
return slugify(value)
def safe_snake_case(value):
"""
Convert a string to ASCII similar to Django's slugify, with catious handling of
non-ASCII alphanumeric characters. See `cautious_slugify`.
Any inner whitespace, hyphens or dashes will be converted to underscores and
will be safe for Django template or filename usage.
"""
slugified_ascii_string = cautious_slugify(value)
snake_case_string = slugified_ascii_string.replace("-", "_")
return snake_case_string
def get_content_type_label(content_type):
"""
Return a human-readable label for a content type object, suitable for display in the admin
in place of the default 'wagtailcore | page' representation
"""
model = content_type.model_class()
if model:
return model._meta.verbose_name.capitalize()
else:
# no corresponding model class found; fall back on the name field of the ContentType
return content_type.model.capitalize()
def accepts_kwarg(func, kwarg):
"""
Determine whether the callable `func` has a signature that accepts the keyword argument `kwarg`
"""
signature = inspect.signature(func)
try:
signature.bind_partial(**{kwarg: None})
return True
except TypeError:
return False
class InvokeViaAttributeShortcut:
"""
Used to create a shortcut that allows an object's named
single-argument method to be invoked using a simple
attribute reference syntax. For example, adding the
following to an object:
obj.page_url = InvokeViaAttributeShortcut(obj, 'get_page_url')
Would allow you to invoke get_page_url() like so:
obj.page_url.terms_and_conditions
As well as the usual:
obj.get_page_url('terms_and_conditions')
"""
__slots__ = "obj", "method_name"
def __init__(self, obj, method_name):
self.obj = obj
self.method_name = method_name
def __getattr__(self, name):
method = getattr(self.obj, self.method_name)
return method(name)
def find_available_slug(parent, requested_slug, ignore_page_id=None):
"""
Finds an available slug within the specified parent.
If the requested slug is not available, this adds a number on the end, for example:
- 'requested-slug'
- 'requested-slug-1'
- 'requested-slug-2'
And so on, until an available slug is found.
The `ignore_page_id` keyword argument is useful for when you are updating a page,
you can pass the page being updated here so the page's current slug is not
treated as in use by another page.
"""
pages = parent.get_children().filter(slug__startswith=requested_slug)
if ignore_page_id:
pages = pages.exclude(id=ignore_page_id)
existing_slugs = set(pages.values_list("slug", flat=True))
slug = requested_slug
number = 1
while slug in existing_slugs:
slug = requested_slug + "-" + str(number)
number += 1
return slug
@functools.lru_cache()
def get_content_languages():
"""
Cache of settings.WAGTAIL_CONTENT_LANGUAGES in a dictionary for easy lookups by key.
"""
content_languages = getattr(settings, "WAGTAIL_CONTENT_LANGUAGES", None)
languages = dict(settings.LANGUAGES)
if content_languages is None:
# Default to a single language based on LANGUAGE_CODE
default_language_code = get_supported_language_variant(settings.LANGUAGE_CODE)
try:
language_name = languages[default_language_code]
except KeyError:
# get_supported_language_variant on the 'null' translation backend (used for
# USE_I18N=False) returns settings.LANGUAGE_CODE unchanged without accounting for
# language variants (en-us versus en), so retry with the generic version.
default_language_code = default_language_code.split("-")[0]
try:
language_name = languages[default_language_code]
except KeyError:
# Can't extract a display name, so fall back on displaying LANGUAGE_CODE instead
language_name = settings.LANGUAGE_CODE
# Also need to tweak the languages dict to get around the check below
languages[default_language_code] = settings.LANGUAGE_CODE
content_languages = [
(default_language_code, language_name),
]
# Check that each content language is in LANGUAGES
for language_code, name in content_languages:
if language_code not in languages:
raise ImproperlyConfigured(
"The language {} is specified in WAGTAIL_CONTENT_LANGUAGES but not LANGUAGES. "
"WAGTAIL_CONTENT_LANGUAGES must be a subset of LANGUAGES.".format(
language_code
)
)
return dict(content_languages)
@functools.lru_cache(maxsize=1000)
def get_supported_content_language_variant(lang_code, strict=False):
"""
Return the language code that's listed in supported languages, possibly
selecting a more generic variant. Raise LookupError if nothing is found.
If `strict` is False (the default), look for a country-specific variant
when neither the language code nor its generic variant is found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
This is equvilant to Django's `django.utils.translation.get_supported_content_language_variant`
but reads the `WAGTAIL_CONTENT_LANGUAGES` setting instead.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]["fallback"])
except KeyError:
pass
generic_lang_code = lang_code.split("-")[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_content_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + "-"):
return supported_code
raise LookupError(lang_code)
@functools.lru_cache()
def get_locales_display_names() -> dict:
"""
Cache of the locale id -> locale display name mapping
"""
from wagtail.models import Locale # inlined to avoid circular imports
locales_map = {
locale.pk: locale.get_display_name() for locale in Locale.objects.all()
}
return locales_map
@receiver(setting_changed)
def reset_cache(**kwargs):
"""
Clear cache when global WAGTAIL_CONTENT_LANGUAGES/LANGUAGES/LANGUAGE_CODE settings are changed
"""
if kwargs["setting"] in ("WAGTAIL_CONTENT_LANGUAGES", "LANGUAGES", "LANGUAGE_CODE"):
get_content_languages.cache_clear()
get_supported_content_language_variant.cache_clear()
def multigetattr(item, accessor):
"""
Like getattr, but accepts a dotted path as the accessor to be followed to any depth.
At each step, the lookup on the object can be a dictionary lookup (foo['bar']) or an attribute
lookup (foo.bar), and if it results in a callable, will be called (provided we can do so with
no arguments, and it does not have an 'alters_data' property).
Modelled on the variable resolution logic in Django templates:
https://github.com/django/django/blob/f331eba6d576752dd79c4b37c41d981daa537fe6/django/template/base.py#L838
"""
current = item
for bit in accessor.split("."):
try: # dictionary lookup
current = current[bit]
# ValueError/IndexError are for numpy.array lookup | |
from allauth.socialaccount.adapter import get_adapter
from rest_framework.response import Response
from rest_framework import viewsets, status
from django.contrib.auth.models import User
from rest_framework.decorators import action
from rest_framework.authentication import TokenAuthentication
from .models import Profile,Loan_Record
from .serializers import UserRegistrationSerializers, ProfileSerializer, EditProfileSerilizer,LoanSerializer , DeleteAccountSerializer
from rest_framework.permissions import AllowAny, IsAuthenticated
import jwt
import os
from datetime import datetime, timedelta
# favour django-mailer but fall back to django.core.mail
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter
from allauth.socialaccount.providers.oauth2.client import OAuth2Client
from rest_auth.registration.views import SocialLoginView
from django.http import Http404
# from rest_framework.parsers import FileUploadParser
#login was
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserRegistrationSerializers
authentication_classes = (TokenAuthentication,)
permission_classes = (AllowAny,)
versions =['v1']
# update - default method should be restricted
# pylint: disable=R0201
def update(self, request, *args, **kwargs ):
response = {'message': 'You cant Update your Profile like that'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# destroy - IsAuthenticated an isSelf
# pylint: disable=R0201
def destroy(self, request, *args, **kwargs):
response = {'message': 'You cant delete Profile like this'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# retrieve - default method for all should be restricted,
# pylint: disable=R0201
def list(self, request, *args, **kwargs):
response = {'message': 'You cant list or retrieve users Profile like this'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# pylint: disable=R0201
def retrieve(self, request, pk=None, *args, **kwargs):
response = {'message': 'You cant list or retrieve users Profile like this'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
class ProfileViewSet(viewsets.ModelViewSet):
queryset = Profile.objects.all()
serializer_class = ProfileSerializer
authentication_classes = (TokenAuthentication,) #this option is used to authenticate a user, thus django can identify the token and its owner
permission_classes = (IsAuthenticated,)
versions = ['v1']
# only set permissions for actions as update
# remember to customise Create, delete, retrieve
# pylint: disable=R0201
def update(self, request, *args, **kwargs):
response = {'message': 'You cant edit your Profile like that'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# pylint: disable=R0201
def create(self, request, *args, **kwargs):
response = {'message': 'You cant create Profile like that'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# pylint: disable=R0201
def destroy(self, request, *args, **kwargs):
response = {'message': 'You cant delete Profile like this'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# pylint: disable=R0201
def list(self, request, version="v1", *args, **kwargs):
# check if the version argument exists in the versions list
if version in self.versions:
if request.user:
try:
user = request.user
profile = Profile.objects.get(user=user.id)
serializer = ProfileSerializer(profile, many=False)
response = {'message': 'User profile ', 'result': serializer.data}
return Response(response, status=status.HTTP_200_OK)
except IndexError:
response = {'message': 'User not Authenticated! '}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
else:
response = {'message': 'API version not identified!'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# pylint: disable=R0201
def retrieve(self, request, pk=None, *args, **kwargs):
response = {'message': 'You cant retrieve users Profile like this'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# write a custom method that uses the authToken for access privileges
# pylint: disable=R0201
@action(detail=False, methods=['PUT'])
def update_profile(self, request, version="v1"):
# check if the version argument exists in the versions list
if version in self.versions:
if request.data :
fetched_data = request.data
user = request.user
try :
profile = Profile.objects.filter(user=user.id)
profile.update(
facebook_user=fetched_data['facebook_user'],
phone=fetched_data['phone'],
profile=fetched_data['profile'])
get_profile = Profile.objects.get(user=user.id)
serializer = EditProfileSerilizer(get_profile, many=False)
response = {'message': 'User profile Updated', 'result': serializer.data}
return Response(response, status=status.HTTP_200_OK)
except Profile.DoesNotExist:
response = {'message': 'user profile does not exit'}
return Response(response, status=status.HTTP_200_OK)
else:
response = {'message': 'API version not identified!'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
class RecoveryViewSet(viewsets.ModelViewSet):
queryset = User.objects.all() #used by serializers output
authentication_classes = (TokenAuthentication,)
permission_classes = (AllowAny,)
versions =['v1']
# pylint: disable=R0201
def update(self, request, *args, **kwargs):
response = {'message': 'You cant edit your Profile like that'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# pylint: disable=R0201
def list(self, request, *args, **kwargs):
response = {'message': 'You cant create Profile like that'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# pylint: disable=R0201
def destroy(self, request, *args, **kwargs):
response = {'message': 'You cant delete Profile like this'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# pylint: disable=R0201
def retrieve(self, request, pk=None, *args, **kwargs):
response = {'message': 'You cant retrieve users Profile like this'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
def create(self, request, version="v1", *args, **kwargs):
# check if the version argument exists in the versions list
if version in self.versions:
if request.data :
fetched_data = request.data
email= fetched_data['email']
try :
# check in fetch email exits
user = User.objects.get(email=email)
# create jwt token
secret = os.getenv("SECRETKEY")
# minutes=1
dt = datetime.now() + timedelta(minutes=5)
encoded = jwt.encode({'email': email, 'exp': dt}, secret , algorithm='HS256')
reset_link = f'{os.getenv("RESETPASS_URL")}/{encoded.decode("utf-8")}'
# send an e-mail to the user
context = {
'user': user,
'reset_link': reset_link
}
print(reset_link)
msg_plain = render_to_string('../templates/password_reset_email.txt', context)
msg_html = render_to_string('../templates/password_reset_email.html', context)
subject = 'Debt notification account recovery request.'
from_email = settings.EMAIL_HOST_USER
message = msg_plain
recipient_list = [email]
send_mail(subject, message, from_email, recipient_list, fail_silently=False, html_message=msg_html)
response= {'token': 'email sent!'}
return Response(response, status=status.HTTP_200_OK)
except User.DoesNotExist:
response = {'message': 'No user associated with this email exits!'}
return Response(response, status=status.HTTP_404_NOT_FOUND)
else:
response = {'message': 'API version not identified!'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
@action(detail=False, methods=['POST'])
def validate_token(self,request, version="v1"):
if version in self.versions:
if request.data :
fetched_data =request.data
encoded_token= fetched_data['token']
try:
secret = os.getenv("SECRETKEY")
jwt.decode(encoded_token, secret, leeway=10, algorithms=['HS256'])
response= {'message': 'Token is still valid and active :)'}
return Response(response, status=status.HTTP_200_OK)
except jwt.ExpiredSignatureError:
response= {'message': 'Token expired. Get new one'}
return Response(response, status=status.HTTP_200_OK)
except jwt.InvalidTokenError:
response= {'message': 'Invalid Token'}
return Response(response, status=status.HTTP_200_OK)
else:
response = {'message': 'API version not identified!'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
@action(detail=False, methods=['POST'])
def confirm(self,request, version="v1"):
if version in self.versions:
if request.data :
try:
# user token and password
fetched_data =request.data
encoded_token= fetched_data['token']
new_password = fetched_data['password']
secret = os.getenv("SECRETKEY")
decode_token = jwt.decode(encoded_token, secret, leeway=10, algorithms=['HS256'])
email = decode_token['email']
# modify existing user
user = User.objects.get(email=email)
user.set_password(<PASSWORD>)
user.save()
response = {'success': 'Password reset was successful!'}
return Response(response, status=status.HTTP_200_OK)
except jwt.InvalidTokenError:
response= {'message': 'Invalid Token'}
return Response(response, status=status.HTTP_200_OK)
except User.DoesNotExist:
response = {'message': 'No user associated with this email exits!'}
return Response(response, status=status.HTTP_200_OK)
else:
response = {'message': 'API version not identified!'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
class RecoveryViewSet(viewsets.ModelViewSet):
queryset = User.objects.all() #used by serializers output
authentication_classes = (TokenAuthentication,)
permission_classes = (AllowAny,)
versions =['v1']
# pylint: disable=R0201
def update(self, request, *args, **kwargs):
response = {'message': 'You cant edit your Profile like that'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# pylint: disable=R0201
def list(self, request, *args, **kwargs):
response = {'message': 'You cant create Profile like that'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# pylint: disable=R0201
def destroy(self, request, *args, **kwargs):
response = {'message': 'You cant delete Profile like this'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
# pylint: disable=R0201
def retrieve(self, request, pk=None, *args, **kwargs):
response = {'message': 'You cant retrieve users Profile like this'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
def create(self, request, version="v1", *args, **kwargs):
# check if the version argument exists in the versions list
if version in self.versions:
if request.data :
fetched_data = request.data
email= fetched_data['email']
try :
# check in fetch email exits
user = User.objects.get(email=email)
# create jwt token
secret = os.getenv("SECRETKEY")
# minutes=1
dt = datetime.now() + timedelta(minutes=1)
encoded = jwt.encode({'email': email, 'exp': dt}, secret , algorithm='HS256')
reset_link = f'{os.getenv("RESETPASS_URL")}/{encoded.decode("utf-8")}'
# send an e-mail to the user
context = {
'user': user,
'reset_link': reset_link
}
print(reset_link)
msg_plain = render_to_string('../templates/password_reset_email.txt', context)
msg_html = render_to_string('../templates/password_reset_email.html', context)
subject = 'Debt notification account recovery request.'
from_email = settings.EMAIL_HOST_USER
message = msg_plain
recipient_list = [email]
send_mail(subject, message, from_email, recipient_list, fail_silently=False, html_message=msg_html)
response= {'token': 'email sent!'}
return Response(response, status=status.HTTP_200_OK)
except User.DoesNotExist:
response = {'message': 'No user associated with this email exits!'}
return Response(response, status=status.HTTP_404_NOT_FOUND)
else:
response = {'message': 'API version not identified!'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
@action(detail=False, methods=['POST'])
def validate_token(self,request, version="v1"):
if version in self.versions:
if request.data :
fetched_data =request.data
encoded_token= fetched_data['token']
try:
secret = os.getenv("SECRETKEY")
jwt.decode(encoded_token, secret, leeway=10, algorithms=['HS256'])
response= {'message': 'Token is still valid and active :)'}
return Response(response, status=status.HTTP_200_OK)
except jwt.ExpiredSignatureError:
response= {'message': 'Token expired. Get new one'}
return Response(response, status=status.HTTP_200_OK)
except jwt.InvalidTokenError:
response= {'message': 'Invalid Token'}
return Response(response, status=status.HTTP_200_OK)
else:
response = {'message': 'API version not identified!'}
return Response(response, status=status.HTTP_400_BAD_REQUEST)
@action(detail=False, methods=['POST'])
def confirm(self,request, version="v1"):
if version in self.versions:
if request.data :
try:
# user token and password
fetched_data =request.data
encoded_token= fetched_data['token']
new_password = fetched_data['password']
secret = os.getenv("SECRETKEY")
decode_token = jwt.decode(encoded_token, secret, leeway=10, algorithms=['HS256'])
email = decode_token['email']
# modify existing user
user = User.objects.get(email=email)
user.set_password(<PASSWORD>)
user.save()
response = {'success': 'Password reset was successful!'}
return Response(response, status=status.HTTP_200_OK)
except jwt.InvalidTokenError:
response= {'message': 'Invalid Token'}
return | |
iterhelper = context.make_helper(builder, resty)
iterhelper.parent = d
iterhelper.state = iterhelper.state.type(None)
return impl_ret_borrowed(
context,
builder,
resty,
iterhelper._getvalue(),
)
return codegen
@intrinsic
def _dict_items(typingctx, d):
"""Get dictionary iterator for .items()"""
resty = types.DictItemsIterableType(d)
sig = resty(d)
codegen = _iterator_codegen(resty)
return sig, codegen
@intrinsic
def _dict_keys(typingctx, d):
"""Get dictionary iterator for .keys()"""
resty = types.DictKeysIterableType(d)
sig = resty(d)
codegen = _iterator_codegen(resty)
return sig, codegen
@intrinsic
def _dict_values(typingctx, d):
"""Get dictionary iterator for .values()"""
resty = types.DictValuesIterableType(d)
sig = resty(d)
codegen = _iterator_codegen(resty)
return sig, codegen
@intrinsic
def _make_dict(typingctx, keyty, valty, ptr):
"""Make a dictionary struct with the given *ptr*
Parameters
----------
keyty, valty: Type
Type of the key and value, respectively.
ptr : llvm pointer value
Points to the dictionary object.
"""
dict_ty = types.DictType(keyty.instance_type, valty.instance_type)
def codegen(context, builder, signature, args):
[_, _, ptr] = args
ctor = cgutils.create_struct_proxy(dict_ty)
dstruct = ctor(context, builder)
dstruct.data = ptr
alloc_size = context.get_abi_sizeof(
context.get_value_type(types.voidptr),
)
dtor = _imp_dtor(context, builder.module)
meminfo = context.nrt.meminfo_alloc_dtor(
builder,
context.get_constant(types.uintp, alloc_size),
dtor,
)
data_pointer = context.nrt.meminfo_data(builder, meminfo)
data_pointer = builder.bitcast(data_pointer, ll_dict_type.as_pointer())
builder.store(ptr, data_pointer)
dstruct.meminfo = meminfo
return dstruct._getvalue()
sig = dict_ty(keyty, valty, ptr)
return sig, codegen
@overload(new_dict)
def impl_new_dict(key, value):
"""Creates a new dictionary with *key* and *value* as the type
of the dictionary key and value, respectively.
"""
if any([
not isinstance(key, Type),
not isinstance(value, Type),
]):
raise TypeError("expecting *key* and *value* to be a numba Type")
keyty, valty = key, value
def imp(key, value):
dp = _dict_new_minsize(keyty, valty)
_dict_set_method_table(dp, keyty, valty)
d = _make_dict(keyty, valty, dp)
return d
return imp
@overload(len)
def impl_len(d):
"""len(dict)
"""
if not isinstance(d, types.DictType):
return
def impl(d):
return _dict_length(d)
return impl
@overload_method(types.DictType, '__setitem__')
@overload(operator.setitem)
def impl_setitem(d, key, value):
if not isinstance(d, types.DictType):
return
keyty, valty = d.key_type, d.value_type
def impl(d, key, value):
castedkey = _cast(key, keyty)
castedval = _cast(value, valty)
status = _dict_insert(d, castedkey, hash(castedkey), castedval)
if status == Status.OK:
return
elif status == Status.OK_REPLACED:
# replaced
# XXX handle refcount
return
elif status == Status.ERR_CMP_FAILED:
raise ValueError('key comparison failed')
else:
raise RuntimeError('dict.__setitem__ failed unexpectedly')
if d.is_precise():
# Handle the precise case.
return impl
else:
# Handle the imprecise case.
d = d.refine(key, value)
# Re-bind the key type and value type to match the arguments.
keyty, valty = d.key_type, d.value_type
# Create the signature that we wanted this impl to have.
sig = typing.signature(types.void, d, keyty, valty)
return sig, impl
@overload_method(types.DictType, 'get')
def impl_get(dct, key, default=None):
if not isinstance(dct, types.DictType):
return
keyty = dct.key_type
valty = dct.value_type
_sentry_safe_cast_default(default, valty)
def impl(dct, key, default=None):
castedkey = _cast(key, keyty)
ix, val = _dict_lookup(dct, key, hash(castedkey))
if ix > DKIX.EMPTY:
return val
return default
return impl
@overload(operator.getitem)
def impl_getitem(d, key):
if not isinstance(d, types.DictType):
return
keyty = d.key_type
def impl(d, key):
castedkey = _cast(key, keyty)
ix, val = _dict_lookup(d, castedkey, hash(castedkey))
if ix == DKIX.EMPTY:
raise KeyError()
elif ix < DKIX.EMPTY:
raise AssertionError("internal dict error during lookup")
else:
return _nonoptional(val)
return impl
@overload_method(types.DictType, 'popitem')
def impl_popitem(d):
if not isinstance(d, types.DictType):
return
def impl(d):
status, keyval = _dict_popitem(d)
if status == Status.OK:
return _nonoptional(keyval)
elif status == Status.ERR_DICT_EMPTY:
raise KeyError()
else:
raise AssertionError('internal dict error during popitem')
return impl
@overload_method(types.DictType, 'pop')
def impl_pop(dct, key, default=None):
if not isinstance(dct, types.DictType):
return
keyty = dct.key_type
valty = dct.value_type
should_raise = isinstance(default, types.Omitted)
_sentry_safe_cast_default(default, valty)
def impl(dct, key, default=None):
castedkey = _cast(key, keyty)
hashed = hash(castedkey)
ix, val = _dict_lookup(dct, castedkey, hashed)
if ix == DKIX.EMPTY:
if should_raise:
raise KeyError()
else:
return default
elif ix < DKIX.EMPTY:
raise AssertionError("internal dict error during lookup")
else:
status = _dict_delitem(dct,hashed, ix)
if status != Status.OK:
raise AssertionError("internal dict error during delitem")
return val
return impl
@overload(operator.delitem)
def impl_delitem(d, k):
if not isinstance(d, types.DictType):
return
def impl(d, k):
d.pop(k)
return impl
@overload(operator.contains)
def impl_contains(d, k):
if not isinstance(d, types.DictType):
return
keyty = d.key_type
def impl(d, k):
k = _cast(k, keyty)
ix, val = _dict_lookup(d, k, hash(k))
return ix > DKIX.EMPTY
return impl
@overload_method(types.DictType, 'clear')
def impl_clear(d):
if not isinstance(d, types.DictType):
return
def impl(d):
while len(d):
d.popitem()
return impl
@overload_method(types.DictType, 'copy')
def impl_copy(d):
if not isinstance(d, types.DictType):
return
key_type, val_type = d.key_type, d.value_type
def impl(d):
newd = new_dict(key_type, val_type)
for k, v in d.items():
newd[k] = v
return newd
return impl
@overload_method(types.DictType, 'setdefault')
def impl_setdefault(dct, key, default=None):
if not isinstance(dct, types.DictType):
return
def impl(dct, key, default=None):
if key not in dct:
dct[key] = default
return dct[key]
return impl
@overload_method(types.DictType, 'items')
def impl_items(d):
if not isinstance(d, types.DictType):
return
def impl(d):
it = _dict_items(d)
return it
return impl
@overload_method(types.DictType, 'keys')
def impl_keys(d):
if not isinstance(d, types.DictType):
return
def impl(d):
return _dict_keys(d)
return impl
@overload_method(types.DictType, 'values')
def impl_values(d):
if not isinstance(d, types.DictType):
return
def impl(d):
return _dict_values(d)
return impl
@overload(operator.eq)
def impl_equal(da, db):
if not isinstance(da, types.DictType):
return
if not isinstance(db, types.DictType):
# If RHS is not a dictionary, always returns False
def impl_type_mismatch(da, db):
return False
return impl_type_mismatch
otherkeyty = db.key_type
def impl_type_matched(da, db):
if len(da) != len(db):
return False
for ka, va in da.items():
# Cast key from LHS to the key-type of RHS
kb = _cast(ka, otherkeyty)
ix, vb = _dict_lookup(db, kb, hash(kb))
if ix <= DKIX.EMPTY:
# Quit early if the key is not found
return False
if va != vb:
# Quit early if the values do not match
return False
return True
return impl_type_matched
@overload(operator.ne)
def impl_not_equal(da, db):
if not isinstance(da, types.DictType):
return
def impl(da, db):
return not (da == db)
return impl
@lower_builtin('getiter', types.DictItemsIterableType)
@lower_builtin('getiter', types.DictKeysIterableType)
@lower_builtin('getiter', types.DictValuesIterableType)
def impl_iterable_getiter(context, builder, sig, args):
"""Implement iter() for .keys(), .values(), .items()
"""
iterablety = sig.args[0]
it = context.make_helper(builder, iterablety.iterator_type, args[0])
fnty = ir.FunctionType(
ir.VoidType(),
[ll_dictiter_type, ll_dict_type],
)
fn = cgutils.get_or_insert_function(builder.module, fnty,
'numba_dict_iter')
proto = ctypes.CFUNCTYPE(ctypes.c_size_t)
dictiter_sizeof = proto(_helperlib.c_helpers['dict_iter_sizeof'])
state_type = ir.ArrayType(ir.IntType(8), dictiter_sizeof())
pstate = cgutils.alloca_once(builder, state_type, zfill=True)
it.state = _as_bytes(builder, pstate)
dp = _container_get_data(context, builder, iterablety.parent, it.parent)
builder.call(fn, [it.state, dp])
return impl_ret_borrowed(
context,
builder,
sig.return_type,
it._getvalue(),
)
@lower_builtin('getiter', types.DictType)
def impl_dict_getiter(context, builder, sig, args):
"""Implement iter(Dict). Semantically equivalent to dict.keys()
"""
[td] = sig.args
[d] = args
iterablety = types.DictKeysIterableType(td)
it = context.make_helper(builder, iterablety.iterator_type)
fnty = ir.FunctionType(
ir.VoidType(),
[ll_dictiter_type, ll_dict_type],
)
fn = cgutils.get_or_insert_function(builder.module, fnty, 'numba_dict_iter')
proto = ctypes.CFUNCTYPE(ctypes.c_size_t)
dictiter_sizeof = proto(_helperlib.c_helpers['dict_iter_sizeof'])
state_type = ir.ArrayType(ir.IntType(8), dictiter_sizeof())
pstate = cgutils.alloca_once(builder, state_type, zfill=True)
it.state = _as_bytes(builder, pstate)
it.parent = d
dp = _container_get_data(context, builder, iterablety.parent, args[0])
builder.call(fn, [it.state, dp])
return impl_ret_borrowed(
context,
builder,
sig.return_type,
it._getvalue(),
)
@lower_builtin('iternext', types.DictIteratorType)
@iternext_impl(RefType.BORROWED)
def impl_iterator_iternext(context, builder, sig, args, result):
iter_type = sig.args[0]
it = context.make_helper(builder, iter_type, args[0])
p2p_bytes = ll_bytes.as_pointer()
iternext_fnty = ir.FunctionType(
ll_status,
[ll_bytes, p2p_bytes, p2p_bytes]
)
iternext = cgutils.get_or_insert_function(
builder.module, iternext_fnty, 'numba_dict_iter_next',
)
key_raw_ptr = cgutils.alloca_once(builder, ll_bytes)
val_raw_ptr = cgutils.alloca_once(builder, ll_bytes)
status = builder.call(iternext, (it.state, key_raw_ptr, val_raw_ptr))
# TODO: no handling of error state i.e. mutated dictionary
# all errors are treated as exhausted iterator
is_valid = builder.icmp_unsigned('==', status, status.type(0))
result.set_valid(is_valid)
with builder.if_then(is_valid):
yield_type = iter_type.yield_type
key_ty, val_ty = iter_type.parent.keyvalue_type
dm_key = context.data_model_manager[key_ty]
dm_val = context.data_model_manager[val_ty]
key_ptr = builder.bitcast(
builder.load(key_raw_ptr),
dm_key.get_data_type().as_pointer(),
)
val_ptr = builder.bitcast(
builder.load(val_raw_ptr),
dm_val.get_data_type().as_pointer(),
)
key = dm_key.load_from_data_pointer(builder, key_ptr)
val = dm_val.load_from_data_pointer(builder, val_ptr)
# All dict iterators use this common implementation.
# Their differences are resolved here.
if isinstance(iter_type.iterable, DictItemsIterableType):
# .items()
tup = context.make_tuple(builder, yield_type, [key, val])
result.yield_(tup)
elif isinstance(iter_type.iterable, DictKeysIterableType):
# .keys()
result.yield_(key)
elif isinstance(iter_type.iterable, DictValuesIterableType):
# .values()
result.yield_(val)
else:
# unreachable
raise AssertionError('unknown type: {}'.format(iter_type.iterable))
def build_map(context, builder, dict_type, item_types, items):
if isinstance(dict_type, types.LiteralStrKeyDict):
unliteral_tys = [x for x in
dict_type.literal_value.values()]
nbty = types.NamedTuple(unliteral_tys,
dict_type.tuple_ty)
values = [x[1] for x in items]
# replace with make_tuple call?
tup = context.get_constant_undef(nbty)
literal_tys = [x for x in dict_type.literal_value.values()]
# this is to deal with repeated keys
value_index = dict_type.value_index
if value_index is None:
# 1:1 map keys:values
value_indexer = range(len(values))
else:
# 1:>1 map keys:values, e.g. {'a':1, 'a': 'foo'}
value_indexer = value_index.values()
for i, ix in enumerate(value_indexer):
val = values[ix]
casted = context.cast(builder, val, literal_tys[i],
unliteral_tys[i])
tup = builder.insert_value(tup, casted, i)
d = tup
context.nrt.incref(builder, nbty, d)
else:
from numba.typed import Dict
dt = types.DictType(dict_type.key_type, dict_type.value_type)
kt, vt = dict_type.key_type, dict_type.value_type
sig = typing.signature(dt)
def make_dict():
return Dict.empty(kt, vt)
d = context.compile_internal(builder, make_dict, sig, ())
if items:
for (kt, vt), (k, v) in zip(item_types, items):
sig = typing.signature(types.void, dt, kt, vt)
args = d, k, v
def | |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This file handles all flask-restful resources for /v3/users
import base64
import os
import uuid
import flask
import http.client
from oslo_serialization import jsonutils
from werkzeug import exceptions
from keystone.api._shared import json_home_relations
from keystone.application_credential import schema as app_cred_schema
from keystone.common import json_home
from keystone.common import provider_api
from keystone.common import rbac_enforcer
from keystone.common import utils
from keystone.common import validation
import keystone.conf
from keystone import exception as ks_exception
from keystone.i18n import _
from keystone.identity import schema
from keystone import notifications
from keystone.server import flask as ks_flask
CRED_TYPE_EC2 = 'ec2'
CONF = keystone.conf.CONF
ENFORCER = rbac_enforcer.RBACEnforcer
PROVIDERS = provider_api.ProviderAPIs
ACCESS_TOKEN_ID_PARAMETER_RELATION = (
json_home_relations.os_oauth1_parameter_rel_func(
parameter_name='access_token_id')
)
def _convert_v3_to_ec2_credential(credential):
# Prior to bug #1259584 fix, blob was stored unserialized
# but it should be stored as a json string for compatibility
# with the v3 credentials API. Fall back to the old behavior
# for backwards compatibility with existing DB contents
try:
blob = jsonutils.loads(credential['blob'])
except TypeError:
blob = credential['blob']
return {'user_id': credential.get('user_id'),
'tenant_id': credential.get('project_id'),
'access': blob.get('access'),
'secret': blob.get('secret'),
'trust_id': blob.get('trust_id')}
def _format_token_entity(entity):
formatted_entity = entity.copy()
access_token_id = formatted_entity['id']
user_id = formatted_entity.get('authorizing_user_id', '')
if 'role_ids' in entity:
formatted_entity.pop('role_ids')
if 'access_secret' in entity:
formatted_entity.pop('access_secret')
url = ('/users/%(user_id)s/OS-OAUTH1/access_tokens/%(access_token_id)s'
'/roles' % {'user_id': user_id,
'access_token_id': access_token_id})
formatted_entity.setdefault('links', {})
formatted_entity['links']['roles'] = (ks_flask.base_url(url))
return formatted_entity
def _check_unrestricted_application_credential(token):
if 'application_credential' in token.methods:
if not token.application_credential['unrestricted']:
action = _("Using method 'application_credential' is not "
"allowed for managing additional application "
"credentials.")
raise ks_exception.ForbiddenAction(action=action)
def _build_user_target_enforcement():
target = {}
try:
target['user'] = PROVIDERS.identity_api.get_user(
flask.request.view_args.get('user_id')
)
if flask.request.view_args.get('group_id'):
target['group'] = PROVIDERS.identity_api.get_group(
flask.request.view_args.get('group_id')
)
except ks_exception.NotFound: # nosec
# Defer existence in the event the user doesn't exist, we'll
# check this later anyway.
pass
return target
def _build_enforcer_target_data_owner_and_user_id_match():
ref = {}
if flask.request.view_args:
credential_id = flask.request.view_args.get('credential_id')
if credential_id is not None:
hashed_id = utils.hash_access_key(credential_id)
ref['credential'] = PROVIDERS.credential_api.get_credential(
hashed_id)
return ref
def _format_role_entity(role_id):
role = PROVIDERS.role_api.get_role(role_id)
formatted_entity = role.copy()
if 'description' in role:
formatted_entity.pop('description')
if 'enabled' in role:
formatted_entity.pop('enabled')
return formatted_entity
class UserResource(ks_flask.ResourceBase):
collection_key = 'users'
member_key = 'user'
get_member_from_driver = PROVIDERS.deferred_provider_lookup(
api='identity_api', method='get_user')
def get(self, user_id=None):
"""Get a user resource or list users.
GET/HEAD /v3/users
GET/HEAD /v3/users/{user_id}
"""
if user_id is not None:
return self._get_user(user_id)
return self._list_users()
def _get_user(self, user_id):
"""Get a user resource.
GET/HEAD /v3/users/{user_id}
"""
ENFORCER.enforce_call(
action='identity:get_user',
build_target=_build_user_target_enforcement
)
ref = PROVIDERS.identity_api.get_user(user_id)
return self.wrap_member(ref)
def _list_users(self):
"""List users.
GET/HEAD /v3/users
"""
filters = ('domain_id', 'enabled', 'idp_id', 'name', 'protocol_id',
'unique_id', 'password_expires_at')
target = None
if self.oslo_context.domain_id:
target = {'domain_id': self.oslo_context.domain_id}
hints = self.build_driver_hints(filters)
ENFORCER.enforce_call(
action='identity:list_users', filters=filters, target_attr=target
)
domain = self._get_domain_id_for_list_request()
if domain is None and self.oslo_context.domain_id:
domain = self.oslo_context.domain_id
refs = PROVIDERS.identity_api.list_users(
domain_scope=domain, hints=hints)
# If the user making the request used a domain-scoped token, let's make
# sure we filter out users that are not in that domain. Otherwise, we'd
# be exposing users in other domains. This if statement is needed in
# case _get_domain_id_for_list_request() short-circuits due to
# configuration and protects against information from other domains
# leaking to people who shouldn't see it.
if self.oslo_context.domain_id:
domain_id = self.oslo_context.domain_id
users = [user for user in refs if user['domain_id'] == domain_id]
else:
users = refs
return self.wrap_collection(users, hints=hints)
def post(self):
"""Create a user.
POST /v3/users
"""
user_data = self.request_body_json.get('user', {})
target = {'user': user_data}
ENFORCER.enforce_call(
action='identity:create_user', target_attr=target
)
validation.lazy_validate(schema.user_create, user_data)
user_data = self._normalize_dict(user_data)
user_data = self._normalize_domain_id(user_data)
ref = PROVIDERS.identity_api.create_user(
user_data,
initiator=self.audit_initiator)
return self.wrap_member(ref), http.client.CREATED
def patch(self, user_id):
"""Update a user.
PATCH /v3/users/{user_id}
"""
ENFORCER.enforce_call(
action='identity:update_user',
build_target=_build_user_target_enforcement
)
PROVIDERS.identity_api.get_user(user_id)
user_data = self.request_body_json.get('user', {})
validation.lazy_validate(schema.user_update, user_data)
self._require_matching_id(user_data)
ref = PROVIDERS.identity_api.update_user(
user_id, user_data, initiator=self.audit_initiator)
return self.wrap_member(ref)
def delete(self, user_id):
"""Delete a user.
DELETE /v3/users/{user_id}
"""
ENFORCER.enforce_call(
action='identity:delete_user',
build_target=_build_user_target_enforcement
)
PROVIDERS.identity_api.delete_user(user_id)
return None, http.client.NO_CONTENT
class UserChangePasswordResource(ks_flask.ResourceBase):
@ks_flask.unenforced_api
def get(self, user_id):
# Special case, GET is not allowed.
raise exceptions.MethodNotAllowed(valid_methods=['POST'])
@ks_flask.unenforced_api
def post(self, user_id):
user_data = self.request_body_json.get('user', {})
validation.lazy_validate(schema.password_change, user_data)
try:
PROVIDERS.identity_api.change_password(
user_id=user_id,
original_password=user_data['<PASSWORD>'],
new_password=user_data['password'],
initiator=self.audit_initiator)
except AssertionError as e:
raise ks_exception.Unauthorized(
_('Error when changing user password: %s') % e
)
return None, http.client.NO_CONTENT
class UserProjectsResource(ks_flask.ResourceBase):
collection_key = 'projects'
member_key = 'project'
get_member_from_driver = PROVIDERS.deferred_provider_lookup(
api='resource_api', method='get_project')
def get(self, user_id):
filters = ('domain_id', 'enabled', 'name')
ENFORCER.enforce_call(action='identity:list_user_projects',
filters=filters,
build_target=_build_user_target_enforcement)
hints = self.build_driver_hints(filters)
refs = PROVIDERS.assignment_api.list_projects_for_user(user_id)
return self.wrap_collection(refs, hints=hints)
class UserGroupsResource(ks_flask.ResourceBase):
collection_key = 'groups'
member_key = 'group'
get_member_from_driver = PROVIDERS.deferred_provider_lookup(
api='identity_api', method='get_group')
def get(self, user_id):
"""Get groups for a user.
GET/HEAD /v3/users/{user_id}/groups
"""
filters = ('name',)
hints = self.build_driver_hints(filters)
ENFORCER.enforce_call(action='identity:list_groups_for_user',
build_target=_build_user_target_enforcement,
filters=filters)
refs = PROVIDERS.identity_api.list_groups_for_user(user_id=user_id,
hints=hints)
if (self.oslo_context.domain_id):
filtered_refs = []
for ref in refs:
if ref['domain_id'] == self.oslo_context.domain_id:
filtered_refs.append(ref)
refs = filtered_refs
return self.wrap_collection(refs, hints=hints)
class _UserOSEC2CredBaseResource(ks_flask.ResourceBase):
collection_key = 'credentials'
member_key = 'credential'
@classmethod
def _add_self_referential_link(cls, ref, collection_name=None):
# NOTE(morgan): This should be refactored to have an EC2 Cred API with
# a sane prefix instead of overloading the "_add_self_referential_link"
# method. This was chosen as it more closely mirrors the pre-flask
# code (for transition).
path = '/users/%(user_id)s/credentials/OS-EC2/%(credential_id)s'
url = ks_flask.base_url(path) % {
'user_id': ref['user_id'],
'credential_id': ref['access']}
ref.setdefault('links', {})
ref['links']['self'] = url
class UserOSEC2CredentialsResourceListCreate(_UserOSEC2CredBaseResource):
def get(self, user_id):
"""List EC2 Credentials for user.
GET/HEAD /v3/users/{user_id}/credentials/OS-EC2
"""
ENFORCER.enforce_call(action='identity:ec2_list_credentials')
PROVIDERS.identity_api.get_user(user_id)
credential_refs = PROVIDERS.credential_api.list_credentials_for_user(
user_id, type=CRED_TYPE_EC2)
collection_refs = [
_convert_v3_to_ec2_credential(cred)
for cred in credential_refs
]
return self.wrap_collection(collection_refs)
def post(self, user_id):
"""Create EC2 Credential for user.
POST /v3/users/{user_id}/credentials/OS-EC2
"""
target = {}
target['credential'] = {'user_id': user_id}
ENFORCER.enforce_call(action='identity:ec2_create_credential',
target_attr=target)
PROVIDERS.identity_api.get_user(user_id)
tenant_id = self.request_body_json.get('tenant_id')
PROVIDERS.resource_api.get_project(tenant_id)
blob = dict(
access=uuid.uuid4().hex,
secret=uuid.uuid4().hex,
trust_id=self.oslo_context.trust_id
)
credential_id = utils.hash_access_key(blob['access'])
cred_data = dict(
user_id=user_id,
project_id=tenant_id,
blob=jsonutils.dumps(blob),
id=credential_id,
type=CRED_TYPE_EC2
)
PROVIDERS.credential_api.create_credential(credential_id, cred_data)
ref = _convert_v3_to_ec2_credential(cred_data)
return self.wrap_member(ref), http.client.CREATED
class UserOSEC2CredentialsResourceGetDelete(_UserOSEC2CredBaseResource):
@staticmethod
def _get_cred_data(credential_id):
cred = PROVIDERS.credential_api.get_credential(credential_id)
if not cred or cred['type'] != CRED_TYPE_EC2:
raise ks_exception.Unauthorized(
message=_('EC2 access key not found.'))
return _convert_v3_to_ec2_credential(cred)
def get(self, user_id, credential_id):
"""Get a specific EC2 credential.
GET/HEAD /users/{user_id}/credentials/OS-EC2/{credential_id}
"""
func = _build_enforcer_target_data_owner_and_user_id_match
ENFORCER.enforce_call(
action='identity:ec2_get_credential',
build_target=func)
PROVIDERS.identity_api.get_user(user_id)
ec2_cred_id = utils.hash_access_key(credential_id)
cred_data = self._get_cred_data(ec2_cred_id)
return self.wrap_member(cred_data)
def delete(self, user_id, credential_id):
"""Delete a specific EC2 credential.
DELETE /users/{user_id}/credentials/OS-EC2/{credential_id}
"""
func = _build_enforcer_target_data_owner_and_user_id_match
ENFORCER.enforce_call(action='identity:ec2_delete_credential',
build_target=func)
PROVIDERS.identity_api.get_user(user_id)
ec2_cred_id = utils.hash_access_key(credential_id)
self._get_cred_data(ec2_cred_id)
PROVIDERS.credential_api.delete_credential(ec2_cred_id)
return None, http.client.NO_CONTENT
class _OAuth1ResourceBase(ks_flask.ResourceBase):
collection_key = 'access_tokens'
member_key = 'access_token'
@classmethod
def _add_self_referential_link(cls, ref, collection_name=None):
# NOTE(morgan): This should be refactored to have an OAuth1 API with
# a sane prefix instead of overloading the "_add_self_referential_link"
# method. This was chosen as it more closely mirrors the pre-flask
# code (for transition).
ref.setdefault('links', {})
path = '/users/%(user_id)s/OS-OAUTH1/access_tokens' % {
'user_id': ref.get('authorizing_user_id', '')
}
ref['links']['self'] = ks_flask.base_url(path) + '/' + ref['id']
class OAuth1ListAccessTokensResource(_OAuth1ResourceBase):
def get(self, user_id):
"""List OAuth1 Access Tokens for user.
GET /v3/users/{user_id}/OS-OAUTH1/access_tokens
"""
ENFORCER.enforce_call(action='identity:list_access_tokens')
if self.oslo_context.is_delegated_auth:
raise ks_exception.Forbidden(
_('Cannot list request tokens with a token '
'issued via delegation.'))
refs = PROVIDERS.oauth_api.list_access_tokens(user_id)
formatted_refs = ([_format_token_entity(x) for x in refs])
return self.wrap_collection(formatted_refs)
class OAuth1AccessTokenCRUDResource(_OAuth1ResourceBase):
def get(self, user_id, access_token_id):
"""Get specific access token.
GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
"""
ENFORCER.enforce_call(action='identity:get_access_token')
access_token = PROVIDERS.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise ks_exception.NotFound()
access_token = _format_token_entity(access_token)
return self.wrap_member(access_token)
def delete(self, user_id, access_token_id):
"""Delete specific access token.
DELETE /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}
"""
ENFORCER.enforce_call(
action='identity:ec2_delete_credential',
build_target=_build_enforcer_target_data_owner_and_user_id_match)
access_token = PROVIDERS.oauth_api.get_access_token(access_token_id)
reason = (
'Invalidating the token cache because an access token for '
'consumer %(consumer_id)s has been deleted. Authorization for '
'users with OAuth tokens will be recalculated and enforced '
'accordingly the next time they authenticate or validate a '
'token.' % {'consumer_id': access_token['consumer_id']}
)
notifications.invalidate_token_cache_notification(reason)
PROVIDERS.oauth_api.delete_access_token(
user_id, access_token_id, initiator=self.audit_initiator)
return None, http.client.NO_CONTENT
class OAuth1AccessTokenRoleListResource(ks_flask.ResourceBase):
collection_key = 'roles'
member_key = 'role'
def get(self, user_id, access_token_id):
"""List roles for a user access token.
GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/
{access_token_id}/roles
"""
ENFORCER.enforce_call(action='identity:list_access_token_roles')
access_token = PROVIDERS.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise ks_exception.NotFound()
authed_role_ids = access_token['role_ids']
authed_role_ids = jsonutils.loads(authed_role_ids)
refs = ([_format_role_entity(x) for x in authed_role_ids])
return self.wrap_collection(refs)
class OAuth1AccessTokenRoleResource(ks_flask.ResourceBase):
collection_key = 'roles'
member_key = 'role'
def get(self, user_id, access_token_id, role_id):
"""Get role for access token.
GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/
{access_token_id}/roles/{role_id}
"""
ENFORCER.enforce_call(action='identity:get_access_token_role')
access_token = PROVIDERS.oauth_api.get_access_token(access_token_id)
if access_token['authorizing_user_id'] != user_id:
raise ks_exception.Unauthorized(_('User IDs do | |
allowed to run before the controller terminates the io.argoproj.workflow.v1alpha1. A value of zero is used to terminate a Running workflow",
)
affinity: Optional[v1.Affinity] = Field(
None,
description="Affinity sets the scheduling constraints for all pods in the io.argoproj.workflow.v1alpha1. Can be overridden by an affinity specified in the template",
)
arguments: Optional[Arguments] = Field(
None,
description="Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally using the 'workflow' variable prefix. e.g. {{io.argoproj.workflow.v1alpha1.parameters.myparam}}",
)
artifactRepositoryRef: Optional[ArtifactRepositoryRef] = Field(
None,
description="ArtifactRepositoryRef specifies the configMap name and key containing the artifact repository config.",
)
automountServiceAccountToken: Optional[bool] = Field(
None,
description="AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in pods. ServiceAccountName of ExecutorConfig must be specified if this value is false.",
)
dnsConfig: Optional[v1.PodDNSConfig] = Field(
None,
description="PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.",
)
dnsPolicy: Optional[str] = Field(
None,
description="Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.",
)
entrypoint: Optional[str] = Field(
None,
description="Entrypoint is a template reference to the starting point of the io.argoproj.workflow.v1alpha1.",
)
executor: Optional[ExecutorConfig] = Field(
None,
description="Executor holds configurations of executor containers of the io.argoproj.workflow.v1alpha1.",
)
hostAliases: Optional[List[v1.HostAlias]] = None
hostNetwork: Optional[bool] = Field(
None,
description="Host networking requested for this workflow pod. Default to false.",
)
imagePullSecrets: Optional[List[v1.LocalObjectReference]] = Field(
None,
description="ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod",
)
metrics: Optional[Metrics] = Field(
None, description="Metrics are a list of metrics emitted from this Workflow"
)
nodeSelector: Optional[Dict[str, str]] = Field(
None,
description="NodeSelector is a selector which will result in all pods of the workflow to be scheduled on the selected node(s). This is able to be overridden by a nodeSelector specified in the template.",
)
onExit: Optional[str] = Field(
None,
description="OnExit is a template reference which is invoked at the end of the workflow, irrespective of the success, failure, or error of the primary io.argoproj.workflow.v1alpha1.",
)
parallelism: Optional[int] = Field(
None,
description="Parallelism limits the max total parallel pods that can execute at the same time in a workflow",
)
podDisruptionBudget: Optional[v1beta1.PodDisruptionBudgetSpec] = Field(
None,
description="PodDisruptionBudget holds the number of concurrent disruptions that you allow for Workflow's Pods. Controller will automatically add the selector with workflow name, if selector is empty. Optional: Defaults to empty.",
)
podGC: Optional[PodGC] = Field(
None,
description="PodGC describes the strategy to use when to deleting completed pods",
)
podMetadata: Optional[Metadata] = Field(
None,
description="PodMetadata defines additional metadata that should be applied to workflow pods",
)
podPriority: Optional[int] = Field(
None, description="Priority to apply to workflow pods."
)
podPriorityClassName: Optional[str] = Field(
None, description="PriorityClassName to apply to workflow pods."
)
podSpecPatch: Optional[str] = Field(
None,
description="PodSpecPatch holds strategic merge patch to apply against the pod spec. Allows parameterization of container fields which are not strings (e.g. resource limits).",
)
priority: Optional[int] = Field(
None,
description="Priority is used if controller is configured to process limited number of workflows in parallel. Workflows with higher priority are processed first.",
)
retryStrategy: Optional[RetryStrategy] = Field(
None,
description="RetryStrategy for all templates in the io.argoproj.workflow.v1alpha1.",
)
schedulerName: Optional[str] = Field(
None,
description="Set scheduler name for all pods. Will be overridden if container/script template's scheduler name is set. Default scheduler will be used if neither specified.",
)
securityContext: Optional[v1.PodSecurityContext] = Field(
None,
description="SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.",
)
serviceAccountName: Optional[str] = Field(
None,
description="ServiceAccountName is the name of the ServiceAccount to run all pods of the workflow as.",
)
shutdown: Optional[str] = Field(
None,
description="Shutdown will shutdown the workflow according to its ShutdownStrategy",
)
suspend: Optional[bool] = Field(
None,
description="Suspend will suspend the workflow and prevent execution of any future steps in the workflow",
)
synchronization: Optional[Synchronization] = Field(
None,
description="Synchronization holds synchronization lock configuration for this Workflow",
)
templates: Optional[List[Template]] = Field(
None, description="Templates is a list of workflow templates used in a workflow"
)
tolerations: Optional[List[v1.Toleration]] = Field(
None, description="Tolerations to apply to workflow pods."
)
ttlStrategy: Optional[TTLStrategy] = Field(
None,
description="TTLStrategy limits the lifetime of a Workflow that has finished execution depending on if it Succeeded or Failed. If this struct is set, once the Workflow finishes, it will be deleted after the time to live expires. If this field is unset, the controller config map will hold the default values.",
)
volumeClaimGC: Optional[VolumeClaimGC] = Field(
None,
description="VolumeClaimGC describes the strategy to use when to deleting volumes from completed workflows",
)
volumeClaimTemplates: Optional[List[v1.PersistentVolumeClaim]] = Field(
None,
description="VolumeClaimTemplates is a list of claims that containers are allowed to reference. The Workflow controller will create the claims at the beginning of the workflow and delete the claims upon completion of the workflow",
)
volumes: Optional[List[v1.Volume]] = Field(
None,
description="Volumes is a list of volumes that can be mounted by containers in a io.argoproj.workflow.v1alpha1.",
)
workflowTemplateRef: Optional[WorkflowTemplateRef] = Field(
None,
description="WorkflowTemplateRef holds a reference to a WorkflowTemplate for execution",
)
class WorkflowStatus(BaseModel):
artifactRepositoryRef: Optional[ArtifactRepositoryRefStatus] = Field(
None,
description="ArtifactRepositoryRef is used to cache the repository to use so we do not need to determine it everytime we reconcile.",
)
compressedNodes: Optional[str] = Field(
None, description="Compressed and base64 decoded Nodes map"
)
conditions: Optional[List[Condition]] = Field(
None, description="Conditions is a list of conditions the Workflow may have"
)
estimatedDuration: Optional[int] = Field(
None, description="EstimatedDuration in seconds."
)
finishedAt: Optional[v1_1.Time] = Field(
None, description="Time at which this workflow completed"
)
message: Optional[str] = Field(
None,
description="A human readable message indicating details about why the workflow is in this condition.",
)
nodes: Optional[Dict[str, NodeStatus]] = Field(
None, description="Nodes is a mapping between a node ID and the node's status."
)
offloadNodeStatusVersion: Optional[str] = Field(
None,
description="Whether on not node status has been offloaded to a database. If exists, then Nodes and CompressedNodes will be empty. This will actually be populated with a hash of the offloaded data.",
)
outputs: Optional[Outputs] = Field(
None,
description="Outputs captures output values and artifact locations produced by the workflow via global outputs",
)
persistentVolumeClaims: Optional[List[v1.Volume]] = Field(
None,
description="PersistentVolumeClaims tracks all PVCs that were created as part of the io.argoproj.workflow.v1alpha1. The contents of this list are drained at the end of the workflow.",
)
phase: Optional[str] = Field(
None,
description="Phase a simple, high-level summary of where the workflow is in its lifecycle.",
)
progress: Optional[str] = Field(None, description="Progress to completion")
resourcesDuration: Optional[Dict[str, int]] = Field(
None, description="ResourcesDuration is the total for the workflow"
)
startedAt: Optional[v1_1.Time] = Field(
None, description="Time at which this workflow started"
)
storedTemplates: Optional[Dict[str, Template]] = Field(
None,
description="StoredTemplates is a mapping between a template ref and the node's status.",
)
storedWorkflowTemplateSpec: Optional[WorkflowSpec] = Field(
None,
description="StoredWorkflowSpec stores the WorkflowTemplate spec for future execution.",
)
synchronization: Optional[SynchronizationStatus] = Field(
None, description="Synchronization stores the status of synchronization locks"
)
class WorkflowTemplateSpec(BaseModel):
activeDeadlineSeconds: Optional[int] = Field(
None,
description="Optional duration in seconds relative to the workflow start time which the workflow is allowed to run before the controller terminates the io.argoproj.workflow.v1alpha1. A value of zero is used to terminate a Running workflow",
)
affinity: Optional[v1.Affinity] = Field(
None,
description="Affinity sets the scheduling constraints for all pods in the io.argoproj.workflow.v1alpha1. Can be overridden by an affinity specified in the template",
)
arguments: Optional[Arguments] = Field(
None,
description="Arguments contain the parameters and artifacts sent to the workflow entrypoint Parameters are referencable globally | |
from __future__ import absolute_import
from ..coordinate import Coordinate
from ..roi import Roi
from .shared_graph_provider import\
SharedGraphProvider, SharedSubGraph
from ..graph import Graph, DiGraph
from pymongo import MongoClient, ASCENDING, ReplaceOne, UpdateOne
from pymongo.errors import BulkWriteError, WriteError
import logging
import numpy as np
import networkx as nx
logger = logging.getLogger(__name__)
class MongoDbGraphProvider(SharedGraphProvider):
'''Provides shared graphs stored in a MongoDB.
Nodes are assumed to have at least an attribute ``id``. If the have a
position attribute (set via argument ``position_attribute``, defaults to
``position``), it will be used for geometric slicing (see ``__getitem__``).
Edges are assumed to have at least attributes ``u``, ``v``.
Arguments:
db_name (``string``):
The name of the MongoDB database.
host (``string``, optional):
The URL of the MongoDB host.
mode (``string``, optional):
One of ``r``, ``r+``, or ``w``. Defaults to ``r+``. ``w`` drops the
node, edge, and meta collections.
directed (``bool``):
True if the graph is directed, false otherwise. If None, attempts
to read value from existing database. If not found, defaults to
false.
nodes_collection (``string``):
edges_collection (``string``):
meta_collection (``string``):
Names of the nodes, edges. and meta collections, should they differ
from ``nodes``, ``edges``, and ``meta``.
endpoint_names (``list`` or ``tuple`` with two elements):
What keys to use for the start and end of an edge. Default is
['u', 'v']
position_attribute (``string`` or list of ``string``s, optional):
The node attribute(s) that contain position information. This will
be used for slicing subgraphs via ``__getitem__``. If a single
string, the attribute is assumed to be an array. If a list, each
entry denotes the position coordinates in order (e.g.,
`position_z`, `position_y`, `position_x`).
'''
def __init__(
self,
db_name,
host=None,
mode='r+',
directed=None,
total_roi=None,
nodes_collection='nodes',
edges_collection='edges',
endpoint_names=None,
meta_collection='meta',
position_attribute='position'):
self.db_name = db_name
self.host = host
self.mode = mode
self.directed = directed
self.total_roi = total_roi
self.nodes_collection_name = nodes_collection
self.edges_collection_name = edges_collection
self.endpoint_names = ['u', 'v'] if endpoint_names is None\
else endpoint_names
self.meta_collection_name = meta_collection
self.client = None
self.database = None
self.nodes = None
self.edges = None
self.meta = None
self.position_attribute = position_attribute
try:
self.__connect()
if mode != 'w':
if self.db_name not in self.client.list_database_names():
logger.warn("Opened with read mode %s, but no db with name"
"%s found in client at %s"
% (mode, self.db_name, self.host))
self.__open_db()
if mode == 'w':
logger.info(
"dropping collections %s, %s, and %s",
self.nodes_collection_name,
self.edges_collection_name,
self.meta_collection_name)
self.__open_collections()
self.nodes.drop()
self.edges.drop()
self.meta.drop()
collection_names = self.database.list_collection_names()
if meta_collection in collection_names:
metadata = self.__get_metadata()
if metadata:
self.__check_metadata(metadata)
else:
self.__set_metadata()
else:
self.__set_metadata()
if nodes_collection not in collection_names:
self.__create_node_collection()
if edges_collection not in collection_names:
self.__create_edge_collection()
except Exception as e:
self.__disconnect()
raise e
def __del__(self):
self.__disconnect()
def read_nodes(self, roi, attr_filter=None, read_attrs=None):
'''Return a list of nodes within roi.
Arguments:
roi (``daisy.Roi``):
Get nodes that fall within this roi
attr_filter (``dict``):
Only return nodes that have attribute=value for
each attribute value pair in attr_filter.
read_attrs (``list`` of ``string``):
Attributes to return. Others will be ignored
'''
logger.debug("Querying nodes in %s", roi)
if attr_filter is None:
attr_filter = {}
try:
self.__connect()
self.__open_db()
self.__open_collections()
pos_query = self.__pos_query(roi)
query_list = [pos_query]
for attr, value in attr_filter.items():
query_list.append({attr: value})
projection = {'_id': False}
if read_attrs is not None:
projection['id'] = True
if type(self.position_attribute) == list:
for a in self.position_attribute:
projection[a] = True
else:
projection[self.position_attribute] = True
for attr in read_attrs:
projection[attr] = True
nodes = self.nodes.find({'$and': query_list}, projection)
nodes = list(nodes)
except Exception as e:
self.__disconnect()
raise e
for node in nodes:
node['id'] = np.uint64(node['id'])
return nodes
def num_nodes(self, roi):
'''Return the number of nodes in the roi.'''
try:
self.__connect()
self.__open_db()
self.__open_collections()
num = self.nodes.count(self.__pos_query(roi))
except Exception as e:
self.__disconnect()
raise e
return num
def has_edges(self, roi):
'''Returns true if there is at least one edge in the roi.'''
try:
self.__connect()
self.__open_db()
self.__open_collections()
nodes = list(self.nodes.find(self.__pos_query(roi)))
# no nodes -> no edges
if len(nodes) == 0:
return False
node_ids = list([int(np.int64(n['id'])) for n in nodes])
# limit query to 1M node IDs (otherwise we might exceed the 16MB
# BSON document size limit)
length = len(node_ids)
query_size = 1000000
num_chunks = (length - 1)//query_size + 1
for i in range(num_chunks):
i_b = i*query_size
i_e = min((i + 1)*query_size, len(node_ids))
assert i_b < len(node_ids)
query = {self.endpoint_names[0]:
{'$in': node_ids[i_b:i_e]}}
if self.edges.find_one(query) is not None:
return True
if num_chunks > 0:
assert i_e == len(node_ids)
except Exception as e:
self.__disconnect()
raise e
return False
def read_edges(self, roi, nodes=None, attr_filter=None, read_attrs=None):
'''Returns a list of edges within roi.
Arguments:
roi (``daisy.Roi``):
Get nodes that fall within this roi
nodes (``dict``):
Return edges with sources in this nodes list. If none,
reads nodes in roi using read_nodes. Dictionary format
is string attribute -> value, including 'id' as an attribute.
attr_filter (``dict``):
Only return nodes that have attribute=value for
each attribute value pair in attr_filter.
read_attrs (``list`` of ``string``):
Attributes to return. Others will be ignored
'''
if nodes is None:
nodes = self.read_nodes(roi)
node_ids = list([int(np.int64(n['id'])) for n in nodes])
logger.debug("found %d nodes", len(node_ids))
logger.debug("looking for edges with u in %s", node_ids[:100])
u, v = self.endpoint_names
edges = []
if attr_filter is None:
attr_filter = {}
try:
self.__connect()
self.__open_db()
self.__open_collections()
# limit query to 1M node IDs (otherwise we might exceed the 16MB
# BSON document size limit)
length = len(node_ids)
query_size = 1000000
num_chunks = (length - 1)//query_size + 1
filters = []
for attr, value in attr_filter.items():
filters.append({attr: value})
projection = {'_id': False}
if read_attrs is not None:
projection[u] = True
projection[v] = True
for attr in read_attrs:
projection[attr] = True
for i in range(num_chunks):
i_b = i*query_size
i_e = min((i + 1)*query_size, len(node_ids))
assert i_b < len(node_ids)
endpoint_query = {self.endpoint_names[0]:
{'$in': node_ids[i_b:i_e]}}
if attr_filter:
query = {'$and': filters + [endpoint_query]}
else:
query = endpoint_query
edges += self.edges.find(query, projection)
if num_chunks > 0:
assert i_e == len(node_ids)
logger.debug("found %d edges", len(edges))
logger.debug("first 100 edges read: %s", edges[:100])
except Exception as e:
self.__disconnect()
raise e
for edge in edges:
edge[u] = np.uint64(edge[u])
edge[v] = np.uint64(edge[v])
return edges
def __getitem__(self, roi):
return self.get_graph(roi)
def get_graph(
self,
roi,
nodes_filter=None,
edges_filter=None,
node_attrs=None,
edge_attrs=None):
''' Return a graph within roi, optionally filtering by
node and edge attributes.
Arguments:
roi (``daisy.Roi``):
Get nodes and edges whose source is within this roi
nodes_filter (``dict``):
edges_filter (``dict``):
Only return nodes/edges that have attribute=value for
each attribute value pair in nodes/edges_filter.
node_attrs (``list`` of ``string``):
Only return these attributes for nodes. Other
attributes will be ignored, but id and position attribute(s)
will always be included. If None (default), return all attrs.
edge_attrs (``list`` of ``string``):
Only return these attributes for edges. Other
attributes will be ignored, but source and target
will always be included. If None (default), return all attrs.
'''
nodes = self.read_nodes(
roi,
attr_filter=nodes_filter,
read_attrs=node_attrs)
edges = self.read_edges(
roi,
nodes=nodes,
attr_filter=edges_filter,
read_attrs=edge_attrs)
u, v = self.endpoint_names
node_list = [
(n['id'], self.__remove_keys(n, ['id']))
for n in nodes]
edge_list = [
(e[u], e[v], self.__remove_keys(e, [u, v]))
for e in edges]
if self.directed:
graph = MongoDbSubDiGraph(
self,
roi)
else:
# create the subgraph
graph = MongoDbSubGraph(
self,
roi)
graph.add_nodes_from(node_list)
graph.add_edges_from(edge_list)
return graph
def __remove_keys(self, dictionary, keys):
'''Removes given keys from dictionary.'''
for key in keys:
del dictionary[key]
return dictionary
def __connect(self):
'''Connects to Mongo client'''
if not self.client:
self.client = MongoClient(self.host)
def __open_db(self):
'''Opens Mongo database'''
if not self.database:
self.database = self.client[self.db_name]
def __open_collections(self):
'''Opens the node, edge, and meta collections'''
if not self.nodes:
self.nodes = self.database[self.nodes_collection_name]
self.edges = self.database[self.edges_collection_name]
self.meta = self.database[self.meta_collection_name]
def __get_metadata(self):
'''Gets metadata out of the meta collection and returns it
as a dictionary.'''
self.__open_collections()
metadata = self.meta.find_one({}, {"_id": False})
return metadata
def __disconnect(self):
'''Closes the mongo client and removes references
to all collections and databases'''
self.nodes = None
self.edges = None
self.meta = None
self.database = None
if self.client:
self.client.close()
self.client = None
def __create_node_collection(self):
'''Creates the node collection, including indexes'''
self.__open_db()
self.__open_collections()
if type(self.position_attribute) == list:
self.nodes.create_index(
[
(key, ASCENDING)
for key in self.position_attribute
],
name='position')
else:
self.nodes.create_index(
[
('position', ASCENDING)
],
name='position')
self.nodes.create_index(
[
('id', ASCENDING)
],
name='id',
unique=True)
def __create_edge_collection(self):
'''Creates the edge collection, including | |
def set_Version(self, Version):
self.Version = Version
def get_UploadId(self):
return self.UploadId
def set_UploadId(self, UploadId):
self.UploadId = UploadId
def get_ShipmentDetail(self):
return self.ShipmentDetail
def set_ShipmentDetail(self, ShipmentDetail):
self.ShipmentDetail = ShipmentDetail
def hasContent_(self):
if (
self.WebAuthenticationDetail is not None or
self.ClientDetail is not None or
self.TransactionDetail is not None or
self.Version is not None or
self.UploadId is not None or
self.ShipmentDetail is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ModifyDangerousGoodsShipmentRequest', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('ModifyDangerousGoodsShipmentRequest')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'ModifyDangerousGoodsShipmentRequest':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='ModifyDangerousGoodsShipmentRequest')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='ModifyDangerousGoodsShipmentRequest', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='ModifyDangerousGoodsShipmentRequest'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='ModifyDangerousGoodsShipmentRequest', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.WebAuthenticationDetail is not None:
namespaceprefix_ = self.WebAuthenticationDetail_nsprefix_ + ':' if (UseCapturedNS_ and self.WebAuthenticationDetail_nsprefix_) else ''
self.WebAuthenticationDetail.export(outfile, level, namespaceprefix_, namespacedef_='', name_='WebAuthenticationDetail', pretty_print=pretty_print)
if self.ClientDetail is not None:
namespaceprefix_ = self.ClientDetail_nsprefix_ + ':' if (UseCapturedNS_ and self.ClientDetail_nsprefix_) else ''
self.ClientDetail.export(outfile, level, namespaceprefix_, namespacedef_='', name_='ClientDetail', pretty_print=pretty_print)
if self.TransactionDetail is not None:
namespaceprefix_ = self.TransactionDetail_nsprefix_ + ':' if (UseCapturedNS_ and self.TransactionDetail_nsprefix_) else ''
self.TransactionDetail.export(outfile, level, namespaceprefix_, namespacedef_='', name_='TransactionDetail', pretty_print=pretty_print)
if self.Version is not None:
namespaceprefix_ = self.Version_nsprefix_ + ':' if (UseCapturedNS_ and self.Version_nsprefix_) else ''
self.Version.export(outfile, level, namespaceprefix_, namespacedef_='', name_='Version', pretty_print=pretty_print)
if self.UploadId is not None:
namespaceprefix_ = self.UploadId_nsprefix_ + ':' if (UseCapturedNS_ and self.UploadId_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sUploadId>%s</%sUploadId>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.UploadId), input_name='UploadId')), namespaceprefix_ , eol_))
if self.ShipmentDetail is not None:
namespaceprefix_ = self.ShipmentDetail_nsprefix_ + ':' if (UseCapturedNS_ and self.ShipmentDetail_nsprefix_) else ''
self.ShipmentDetail.export(outfile, level, namespaceprefix_, namespacedef_='', name_='ShipmentDetail', pretty_print=pretty_print)
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'WebAuthenticationDetail':
obj_ = WebAuthenticationDetail.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.WebAuthenticationDetail = obj_
obj_.original_tagname_ = 'WebAuthenticationDetail'
elif nodeName_ == 'ClientDetail':
obj_ = ClientDetail.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ClientDetail = obj_
obj_.original_tagname_ = 'ClientDetail'
elif nodeName_ == 'TransactionDetail':
obj_ = TransactionDetail.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.TransactionDetail = obj_
obj_.original_tagname_ = 'TransactionDetail'
elif nodeName_ == 'Version':
obj_ = VersionId.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.Version = obj_
obj_.original_tagname_ = 'Version'
elif nodeName_ == 'UploadId':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'UploadId')
value_ = self.gds_validate_string(value_, node, 'UploadId')
self.UploadId = value_
self.UploadId_nsprefix_ = child_.prefix
elif nodeName_ == 'ShipmentDetail':
obj_ = UploadedDangerousGoodsShipmentDetail.factory(parent_object_=self)
obj_.build(child_, gds_collector_=gds_collector_)
self.ShipmentDetail = obj_
obj_.original_tagname_ = 'ShipmentDetail'
# end class ModifyDangerousGoodsShipmentRequest
class NetExplosiveDetail(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Type=None, Amount=None, Units=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.Type = Type
self.validate_NetExplosiveClassificationType(self.Type)
self.Type_nsprefix_ = None
self.Amount = Amount
self.Amount_nsprefix_ = None
self.Units = Units
self.Units_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, NetExplosiveDetail)
if subclass is not None:
return subclass(*args_, **kwargs_)
if NetExplosiveDetail.subclass:
return NetExplosiveDetail.subclass(*args_, **kwargs_)
else:
return NetExplosiveDetail(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Type(self):
return self.Type
def set_Type(self, Type):
self.Type = Type
def get_Amount(self):
return self.Amount
def set_Amount(self, Amount):
self.Amount = Amount
def get_Units(self):
return self.Units
def set_Units(self, Units):
self.Units = Units
def validate_NetExplosiveClassificationType(self, value):
result = True
# Validate type NetExplosiveClassificationType, a restriction on xs:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['NET_EXPLOSIVE_CONTENT', 'NET_EXPLOSIVE_MASS', 'NET_EXPLOSIVE_QUANTITY', 'NET_EXPLOSIVE_WEIGHT']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on NetExplosiveClassificationType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def hasContent_(self):
if (
self.Type is not None or
self.Amount is not None or
self.Units is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='NetExplosiveDetail', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('NetExplosiveDetail')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'NetExplosiveDetail':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='NetExplosiveDetail')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='NetExplosiveDetail', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='NetExplosiveDetail'):
pass
def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='NetExplosiveDetail', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.Type is not None:
namespaceprefix_ = self.Type_nsprefix_ + ':' if (UseCapturedNS_ and self.Type_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sType>%s</%sType>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Type), input_name='Type')), namespaceprefix_ , eol_))
if self.Amount is not None:
namespaceprefix_ = self.Amount_nsprefix_ + ':' if (UseCapturedNS_ and self.Amount_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sAmount>%s</%sAmount>%s' % (namespaceprefix_ , self.gds_format_decimal(self.Amount, input_name='Amount'), namespaceprefix_ , eol_))
if self.Units is not None:
namespaceprefix_ = self.Units_nsprefix_ + ':' if (UseCapturedNS_ and self.Units_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sUnits>%s</%sUnits>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Units), input_name='Units')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def buildAttributes(self, node, attrs, already_processed):
pass
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'Type':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Type')
value_ = self.gds_validate_string(value_, node, 'Type')
self.Type = value_
self.Type_nsprefix_ = child_.prefix
# validate type NetExplosiveClassificationType
self.validate_NetExplosiveClassificationType(self.Type)
elif nodeName_ == 'Amount' and child_.text:
sval_ = child_.text
fval_ = self.gds_parse_decimal(sval_, node, 'Amount')
fval_ = self.gds_validate_decimal(fval_, node, 'Amount')
self.Amount = fval_
self.Amount_nsprefix_ = child_.prefix
elif nodeName_ == 'Units':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'Units')
value_ = self.gds_validate_string(value_, node, 'Units')
self.Units = value_
self.Units_nsprefix_ = child_.prefix
# end class NetExplosiveDetail
class Notification(GeneratedsSuper):
"""The descriptive data regarding the result of the submitted
transaction."""
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, Severity=None, Source=None, Code=None, Message=None, LocalizedMessage=None, MessageParameters=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.Severity = Severity
self.validate_NotificationSeverityType(self.Severity)
self.Severity_nsprefix_ = None
self.Source = Source
self.Source_nsprefix_ = None
self.Code = Code
self.Code_nsprefix_ = None
self.Message = Message
self.Message_nsprefix_ = None
self.LocalizedMessage = LocalizedMessage
self.LocalizedMessage_nsprefix_ = None
if MessageParameters is None:
self.MessageParameters = []
else:
self.MessageParameters = MessageParameters
self.MessageParameters_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, Notification)
if subclass is not None:
return subclass(*args_, **kwargs_)
if Notification.subclass:
return Notification.subclass(*args_, **kwargs_)
else:
return Notification(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_Severity(self):
return self.Severity
def set_Severity(self, Severity):
self.Severity = Severity
def get_Source(self):
return self.Source
def set_Source(self, Source):
self.Source = Source
def get_Code(self):
return self.Code
def set_Code(self, Code):
self.Code = Code
def get_Message(self):
return self.Message
def set_Message(self, Message):
self.Message = Message
def get_LocalizedMessage(self):
return self.LocalizedMessage
def set_LocalizedMessage(self, LocalizedMessage):
self.LocalizedMessage = LocalizedMessage
def get_MessageParameters(self):
return self.MessageParameters
def set_MessageParameters(self, MessageParameters):
self.MessageParameters = MessageParameters
def add_MessageParameters(self, value):
self.MessageParameters.append(value)
def insert_MessageParameters_at(self, index, value):
self.MessageParameters.insert(index, value)
def replace_MessageParameters_at(self, index, value):
self.MessageParameters[index] = value
def validate_NotificationSeverityType(self, value):
result = True
# Validate type NotificationSeverityType, a restriction on xs:string.
if value is not | |
the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- segmentLimits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
'''
if Weight >= 1:
Weight = 0.99
if Weight <= 0:
Weight = 0.01
# Step 1: feature extraction
x = audioBasicIO.stereo2mono(x) # convert to mono
ShortTermFeatures = aF.stFeatureExtraction(x, Fs, stWin * Fs, stStep * Fs) # extract short-term features
# Step 2: train binary SVM classifier of low vs high energy frames
EnergySt = ShortTermFeatures[1, :] # keep only the energy short-term sequence (2nd feature)
E = numpy.sort(EnergySt) # sort the energy feature values:
L1 = int(len(E) / 10) # number of 10% of the total short-term windows
T1 = numpy.mean(E[0:L1]) + 0.000000000000001 # compute "lower" 10% energy threshold
T2 = numpy.mean(E[-L1:-1]) + 0.000000000000001 # compute "higher" 10% energy threshold
Class1 = ShortTermFeatures[:, numpy.where(EnergySt <= T1)[0]] # get all features that correspond to low energy
Class2 = ShortTermFeatures[:, numpy.where(EnergySt >= T2)[0]] # get all features that correspond to high energy
featuresSS = [Class1.T, Class2.T] # form the binary classification task and ...
[featuresNormSS, MEANSS, STDSS] = aT.normalizeFeatures(featuresSS) # normalize and ...
SVM = aT.trainSVM(featuresNormSS, 1.0) # train the respective SVM probabilistic model (ONSET vs SILENCE)
# Step 3: compute onset probability based on the trained SVM
ProbOnset = []
for i in range(ShortTermFeatures.shape[1]): # for each frame
curFV = (ShortTermFeatures[:, i] - MEANSS) / STDSS # normalize feature vector
ProbOnset.append(SVM.predict_proba(curFV.reshape(1,-1))[0][1]) # get SVM probability (that it belongs to the ONSET class)
ProbOnset = numpy.array(ProbOnset)
ProbOnset = smoothMovingAvg(ProbOnset, smoothWindow / stStep) # smooth probability
# Step 4A: detect onset frame indices:
ProbOnsetSorted = numpy.sort(ProbOnset) # find probability Threshold as a weighted average of top 10% and lower 10% of the values
Nt = ProbOnsetSorted.shape[0] / 10
T = (numpy.mean((1 - Weight) * ProbOnsetSorted[0:Nt]) + Weight * numpy.mean(ProbOnsetSorted[-Nt::]))
MaxIdx = numpy.where(ProbOnset > T)[0] # get the indices of the frames that satisfy the thresholding
i = 0
timeClusters = []
segmentLimits = []
# Step 4B: group frame indices to onset segments
while i < len(MaxIdx): # for each of the detected onset indices
curCluster = [MaxIdx[i]]
if i == len(MaxIdx)-1:
break
while MaxIdx[i+1] - curCluster[-1] <= 2:
curCluster.append(MaxIdx[i+1])
i += 1
if i == len(MaxIdx)-1:
break
i += 1
timeClusters.append(curCluster)
segmentLimits.append([curCluster[0] * stStep, curCluster[-1] * stStep])
# Step 5: Post process: remove very small segments:
minDuration = 0.2
segmentLimits2 = []
for s in segmentLimits:
if s[1] - s[0] > minDuration:
segmentLimits2.append(s)
segmentLimits = segmentLimits2
if plot:
timeX = numpy.arange(0, x.shape[0] / float(Fs), 1.0 / Fs)
plt.subplot(2, 1, 1)
plt.plot(timeX, x)
for s in segmentLimits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.subplot(2, 1, 2)
plt.plot(numpy.arange(0, ProbOnset.shape[0] * stStep, stStep), ProbOnset)
plt.title('Signal')
for s in segmentLimits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.title('SVM Probability')
plt.show()
return segmentLimits
def speakerDiarization(fileName, numOfSpeakers, mtSize=2.0, mtStep=0.2, stWin=0.05, LDAdim=35, PLOT=False):
'''
ARGUMENTS:
- fileName: the name of the WAV file to be analyzed
- numOfSpeakers the number of speakers (clusters) in the recording (<=0 for unknown)
- mtSize (opt) mid-term window size
- mtStep (opt) mid-term window step
- stWin (opt) short-term window size
- LDAdim (opt) LDA dimension (0 for no LDA)
- PLOT (opt) 0 for not plotting the results 1 for plottingy
'''
[Fs, x] = audioBasicIO.readAudioFile(fileName)
print fileName
x = audioBasicIO.stereo2mono(x)
Duration = len(x) / Fs
[Classifier1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.loadKNNModel(os.path.join("data","knnSpeakerAll"))
[Classifier2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.loadKNNModel(os.path.join("data","knnSpeakerFemaleMale"))
[MidTermFeatures, ShortTermFeatures] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, mtStep * Fs, round(Fs * stWin), round(Fs*stWin * 0.5))
MidTermFeatures2 = numpy.zeros((MidTermFeatures.shape[0] + len(classNames1) + len(classNames2), MidTermFeatures.shape[1]))
for i in range(MidTermFeatures.shape[1]):
curF1 = (MidTermFeatures[:, i] - MEAN1) / STD1
curF2 = (MidTermFeatures[:, i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
MidTermFeatures2[0:MidTermFeatures.shape[0], i] = MidTermFeatures[:, i]
MidTermFeatures2[MidTermFeatures.shape[0]:MidTermFeatures.shape[0]+len(classNames1), i] = P1 + 0.0001
MidTermFeatures2[MidTermFeatures.shape[0] + len(classNames1)::, i] = P2 + 0.0001
MidTermFeatures = MidTermFeatures2 # TODO
# SELECT FEATURES:
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20]; # SET 0A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 99,100]; # SET 0B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,
# 97,98, 99,100]; # SET 0C
iFeaturesSelect = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53] # SET 1A
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 1B
#iFeaturesSelect = [8,9,10,11,12,13,14,15,16,17,18,19,20,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 1C
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53]; # SET 2A
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 99,100]; # SET 2B
#iFeaturesSelect = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53, 68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98, 99,100]; # SET 2C
#iFeaturesSelect = range(100); # SET 3
#MidTermFeatures += numpy.random.rand(MidTermFeatures.shape[0], MidTermFeatures.shape[1]) * 0.000000010
MidTermFeatures = MidTermFeatures[iFeaturesSelect, :]
(MidTermFeaturesNorm, MEAN, STD) = aT.normalizeFeatures([MidTermFeatures.T])
MidTermFeaturesNorm = MidTermFeaturesNorm[0].T
numOfWindows = MidTermFeatures.shape[1]
# remove outliers:
DistancesAll = numpy.sum(distance.squareform(distance.pdist(MidTermFeaturesNorm.T)), axis=0)
MDistancesAll = numpy.mean(DistancesAll)
iNonOutLiers = numpy.nonzero(DistancesAll < 1.2 * MDistancesAll)[0]
# TODO: Combine energy threshold for outlier removal:
#EnergyMin = numpy.min(MidTermFeatures[1,:])
#EnergyMean = numpy.mean(MidTermFeatures[1,:])
#Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0
#iNonOutLiers = numpy.nonzero(MidTermFeatures[1,:] > Thres)[0]
#print iNonOutLiers
perOutLier = (100.0 * (numOfWindows - iNonOutLiers.shape[0])) / numOfWindows
MidTermFeaturesNormOr = MidTermFeaturesNorm
MidTermFeaturesNorm = MidTermFeaturesNorm[:, iNonOutLiers]
# LDA dimensionality reduction:
if LDAdim > 0:
#[mtFeaturesToReduce, _] = aF.mtFeatureExtraction(x, Fs, mtSize * Fs, stWin * Fs, round(Fs*stWin), round(Fs*stWin));
# extract mid-term features with minimum step:
mtWinRatio = int(round(mtSize / stWin))
mtStepRatio = int(round(stWin / stWin))
mtFeaturesToReduce = []
numOfFeatures = len(ShortTermFeatures)
numOfStatistics = 2
#for i in range(numOfStatistics * numOfFeatures + 1):
for i in range(numOfStatistics * numOfFeatures):
mtFeaturesToReduce.append([])
for i in range(numOfFeatures): # for each of the short-term features:
curPos = 0
N = len(ShortTermFeatures[i])
while (curPos < N):
N1 = curPos
N2 = curPos + mtWinRatio
if N2 > N:
N2 = N
curStFeatures = ShortTermFeatures[i][N1:N2]
mtFeaturesToReduce[i].append(numpy.mean(curStFeatures))
mtFeaturesToReduce[i+numOfFeatures].append(numpy.std(curStFeatures))
curPos += mtStepRatio
mtFeaturesToReduce = numpy.array(mtFeaturesToReduce)
mtFeaturesToReduce2 = numpy.zeros((mtFeaturesToReduce.shape[0] + len(classNames1) + len(classNames2), mtFeaturesToReduce.shape[1]))
for i in range(mtFeaturesToReduce.shape[1]):
curF1 = (mtFeaturesToReduce[:, i] - MEAN1) / STD1
curF2 = (mtFeaturesToReduce[:, i] - MEAN2) / STD2
[Result, P1] = aT.classifierWrapper(Classifier1, "knn", curF1)
[Result, P2] = aT.classifierWrapper(Classifier2, "knn", curF2)
mtFeaturesToReduce2[0:mtFeaturesToReduce.shape[0], i] = mtFeaturesToReduce[:, i]
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]:mtFeaturesToReduce.shape[0] + len(classNames1), i] = P1 + 0.0001
mtFeaturesToReduce2[mtFeaturesToReduce.shape[0]+len(classNames1)::, i] = P2 + 0.0001
mtFeaturesToReduce = mtFeaturesToReduce2
mtFeaturesToReduce = mtFeaturesToReduce[iFeaturesSelect, :]
#mtFeaturesToReduce += numpy.random.rand(mtFeaturesToReduce.shape[0], mtFeaturesToReduce.shape[1]) * 0.0000010
(mtFeaturesToReduce, MEAN, STD) = aT.normalizeFeatures([mtFeaturesToReduce.T])
mtFeaturesToReduce = mtFeaturesToReduce[0].T
#DistancesAll = numpy.sum(distance.squareform(distance.pdist(mtFeaturesToReduce.T)), axis=0)
#MDistancesAll = numpy.mean(DistancesAll)
#iNonOutLiers2 = numpy.nonzero(DistancesAll < 3.0*MDistancesAll)[0]
#mtFeaturesToReduce = mtFeaturesToReduce[:, iNonOutLiers2]
Labels = numpy.zeros((mtFeaturesToReduce.shape[1], ));
LDAstep = 1.0
LDAstepRatio = LDAstep / stWin
#print LDAstep, LDAstepRatio
for i in range(Labels.shape[0]):
Labels[i] = int(i*stWin/LDAstepRatio);
clf = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(n_components=LDAdim)
clf.fit(mtFeaturesToReduce.T, Labels)
MidTermFeaturesNorm = (clf.transform(MidTermFeaturesNorm.T)).T
if numOfSpeakers <= 0:
sRange = range(2, 10)
else:
sRange = [numOfSpeakers]
clsAll = []
silAll = []
centersAll = []
for iSpeakers in sRange:
k_means = sklearn.cluster.KMeans(n_clusters = iSpeakers)
k_means.fit(MidTermFeaturesNorm.T)
cls = k_means.labels_
means = k_means.cluster_centers_
# Y = distance.squareform(distance.pdist(MidTermFeaturesNorm.T))
clsAll.append(cls)
centersAll.append(means)
silA = []; silB = []
for c in range(iSpeakers): # for each speaker (i.e. for each extracted cluster)
clusterPerCent = numpy.nonzero(cls==c)[0].shape[0] / float(len(cls))
if clusterPerCent < 0.020:
silA.append(0.0)
silB.append(0.0)
else:
MidTermFeaturesNormTemp = MidTermFeaturesNorm[:,cls==c] # get subset of feature vectors
Yt = distance.pdist(MidTermFeaturesNormTemp.T) # compute average distance between samples that belong to the cluster (a values)
silA.append(numpy.mean(Yt)*clusterPerCent)
silBs = []
for c2 in range(iSpeakers): # compute distances from samples of other clusters
if c2!=c:
clusterPerCent2 = numpy.nonzero(cls==c2)[0].shape[0] / float(len(cls))
MidTermFeaturesNormTemp2 = MidTermFeaturesNorm[:,cls==c2]
Yt = distance.cdist(MidTermFeaturesNormTemp.T, MidTermFeaturesNormTemp2.T)
silBs.append(numpy.mean(Yt)*(clusterPerCent+clusterPerCent2)/2.0)
silBs = numpy.array(silBs)
silB.append(min(silBs)) # ... and keep the minimum value (i.e. the distance from the "nearest" cluster)
silA = numpy.array(silA);
silB = numpy.array(silB);
sil = []
for c in range(iSpeakers): # for each cluster (speaker)
sil.append( ( silB[c] - silA[c]) / (max(silB[c], silA[c])+0.00001) ) # compute silhouette
silAll.append(numpy.mean(sil)) # keep the AVERAGE SILLOUETTE
#silAll = silAll * (1.0/(numpy.power(numpy.array(sRange),0.5)))
imax = numpy.argmax(silAll) # position of | |
from __future__ import division, print_function
import math
import itertools
import os
import sys
import subprocess
import shapelib
import numpy
from .config import config
from . import scheduler
def window(iterable, size=2, step=1):
"""
iterate over subseqs of iterable
"""
iterators = itertools.tee(iterable, size)
for skip_steps, itr in enumerate(iterators):
for _ in itertools.islice(itr, skip_steps):
pass
window_itr = itertools.izip(*iterators)
if step != 1:
window_itr = itertools.islice(window_itr, 0, 99999999, step)
return window_itr
def soundpressure_to_soundlevel(Pa, p0=0.00002):
"""
convert soundpressure in Pascal to sound level in dB (dBSPL)
Lp(dBSPL) = 20 * log10(p/p0)
p0: threshold of hearing, 0.00002 Pa (20uPa)
"""
return 20 * math.log10(Pa / p0)
def soundlevel_to_soundpressure(dB, p0=0.00002):
"""
convert sound-level in dB to sound-pressure in Pascal
p = p0 * e^(1/20*Lp*log10(10))
p0: threshold of hearing, 0.00002 Pa (20uPa)
"""
return p0 * math.exp(1 / 20 * dB * _math.log(10))
dB2Pa = L2p = soundlevel_to_soundpressure
Pa2dB = p2L = soundpressure_to_soundlevel
def _findfile(f, possible_folders):
"""
Returns one of the `possible_folders` where `f` is present,
or None
"""
for folder in possible_folders:
if os.path.exists(os.path.join(folder, f)):
return folder
return None
def detect_lambda():
"""
Returns the path of the Lambda binary or None if not found
"""
lambdabin = None
def check_which():
"""
:rtype : str or None. The path of the file
"""
try:
path = subprocess.check_output(["which", "lambda"])
if os.path.exists(path):
return path
except subprocess.CalledProcessError:
pass
if config['lambdabin'] is not None:
return config['lambdabin']
if sys.platform == 'darwin':
lambdabin = check_which()
if lambdabin is None:
possible_folders = [
"/Applications",
os.path.expanduser("~/Applications")
]
lambda_app = _findfile('Lambda.app', possible_folders)
if lambda_app:
lambdabin = os.path.join(lambda_app, 'Lambda.app', 'Contents', 'MacOS', 'Lambda')
assert os.path.exists(lambdabin), (
"Found the lambda app (%s) but the lambda binary was not present" % lambda_app)
elif sys.platform == 'linux2':
lambdabin = check_which()
elif sys.platform == 'window':
lambdabin = None
if lambdabin is not None:
config['lambdabin'] = lambdabin
return lambdabin
def geom_rasterize(geom, size_in_meters, size_in_pixels):
"""
rasterize the geometry `geom`, defined in real world coords,
to a matrix of pixels with size `size_in_pixels`
If geom is a line (a linestring, a linering), you should consider
applying a .buffer before rasterizing, to control the "linesize"
Returns --> a numpy.dnarray of 2D, where shape=size_in_pixels
The array is binary
"""
x_meters, y_meters = size_in_meters
x_pixels, y_pixels = size_in_pixels
pixratio = x_pixels / x_meters
a = shapelib.rasterize(geom, pixratio, xrange=(0, x_meters), yrange=(0, y_meters)).array
ay, ax = a.shape
assert (ax, ay) == size_in_pixels
return a
def call_lambda(args, stdout=None, stderr=None, wrap=False):
"""
Call Lambda with the given `args`, as a subprocess
args: passed to the lambda binary
stdout, stderr: None, 'pipe' or 'log'
wrap: wrap the subprocess in a future, to be able to add
done callbacks
Returns --> a subprocess
NB: to add a done_callback:
def finished(future):
print("lambda finished!")
call_lambda([arg1, arg2, ...], wrap=True).add_done_callback(finished)
"""
binary = detect_lambda()
if binary is None:
if sys.platform == 'darwin':
msg = ("Could not find the 'lambda' binary."
"Make sure the Lambda.app was copied to your"
"/Applications folder. If you installed it in another"
"location, create a symbolic link to the binary inside:"
"$ ln -s /path/to/Lambda.app/Contexts/MacOS/Lambda /usr/local/bin"
"and make sure that the destination is in your PATH"
)
else:
msg = ""
print(msg)
raise IOError("Could not find the 'lambda' binary")
args = [str(arg) for arg in args]
cmds = [binary] + list(args)
print("Calling Lamda as: %s" % str(cmds))
if stdout == 'pipe':
stdout = subprocess.PIPE
elif stdout == 'log':
stdout = open("lambda.log", "w")
if stderr == 'pipe':
stdout = subprocess.PIPE
elif stderr == 'log':
stdout = open("lambda-error.log", "w")
if wrap:
return scheduler.subproc_call(cmds, stdout=stdout, stderr=stderr)
else:
return subprocess.Popen(cmds, stdout=stdout, stderr=stderr)
def open_sim_in_lambda(simfile, vis=False, walls=True, contrast=None, cmap=None, fps=None, skip=None, pipe=None):
"""
open the simfile in the Lambda application
pipe: 'pipe' --> will call the subproc. with stdout=subprocess.PIPE
'log' --> will log stdout to 'lambda.log'
None --> does not pipe stdout
"""
simfile = os.path.abspath(simfile)
if " " in simfile:
simfile = '"%s"' % simfile
args = ["-file", simfile]
if vis:
args.append("-vis")
if walls:
args.append("-walls")
if contrast is not None:
args.extend(['-contrast', contrast])
if cmap is not None:
args.extend(['-colormap', cmap])
if fps is not None:
args.extend(['-fps', fps])
if skip is not None:
args.extend(['-skip', skip])
return call_lambda(args, stdout=pipe)
def color_distance_rgb_abs(color1, color2):
"""Calculate the euclidian distance between these two colors
:param color1: (r, g, b) or an image array of shape (Y, X, 3)
:param color2: (r, g, b)
:return: the euclidian distance
"""
if isinstance(color1, numpy.ndarray):
return _img_color_distance(color1, color2)
r1, g1, b1 = color1
r2, g2, b2 = color2
return numpy.sqrt((r1-r2)**2 + (g1-g2)**2 + (b1-b2)**2)
def color_distance_rgb(color1, color2, maxvalue=255):
"""Calculate the normalized distance between these two colors
(the distance will be between 0-1)
:param color1: (r, g, b) or an image of shape (Y, X, 3)
:param color2: (r, g, b)
:return: the ditance (0-1) between these two colors
Example
=======
Calculate a mask indicating where the image is near to a certain
color
im = png_load(pngfile)
distances = color_distance_rgb(im, (0, 0, 255))
mask = (distances < 0.1).astype(int)
selected_color = im*mask
"""
distance = color_distance_rgb_abs(color1, color2)
max_distance = color_distance_rgb_abs((maxvalue, maxvalue, maxvalue), (0, 0, 0))
return distance/max_distance
def _img_color_distance(img, color):
"""calculate the distance of each pixel to a given color
:param img: a numpy.array of shape (y, x, 3)
:param color: a color tuple (r, g, b)
:return: a numpy.array of shape (y, x) where each value is a float 0-1
representing the distance to this color
"""
return numpy.sqrt(numpy.sum((img - color)**2, 2))
def _pypng_load(pngfile):
"""
read png, discard alpha channel
"""
import png
print("using backend: pyPNG")
X, Y, rows_iter, info = png.Reader(filename=pngfile).asDirect()
# check type
if info['greyscale'] or info['bitdepth'] != 8:
raise ValueError("only 24-bit color PNGs are supported")
rows = [numpy.asarray(row, dtype=numpy.uint8) for row in rows_iter]
alpha = info['alpha']
rows2 = []
for row in rows:
if not alpha:
row.shape = (X, 3)
else:
row.shape = (X, 4)
row = row[:,0:3]
rows2.append(row)
mat = numpy.vstack(rows2)
print(mat.shape)
mat.shape = (Y, X, 3)
return mat
def _pil_load(pngfile):
from PIL import Image
def fromimage(image, flatten=0):
if not Image.isImageType(image):
raise TypeError("NOT a PIL Image")
if flatten:
image = image.convert('F')
elif image.mode == '1':
image.convert('L')
return numpy.array(image)
im = Image.open(pngfile)
Y, X, planes = im.shape
assert planes == 3
return fromimage(im)
def png_load(pngfile):
"""
Load a PNG file. Returns a numpy matrix with shape (y, x, 3)
png = png_load(pngfile)
pix = png[4, 5]
r, g, b = pix
:param pngfile: the path of the png file
:return: a numpy array of shape (y, x, 3)
"""
#backends = [_pil_load, _pypng_load]
backends = [_pypng_load, _pil_load]
for backend in backends:
try:
img = backend(pngfile)
return img
except ImportError:
pass
raise ImportError("needs either scipy or pypng to load a png")
def png_create(x, y, path, color=(0, 0, 0)):
"""
create a PNG of given size
:param x: x size (pixels)
:param y: y size (pixels)
:param path: path of generated png
:param color: color (r,g,b) to fill the generated png
"""
if color is None:
color = (0, 0, 0)
mat = numpy.ones((y, x))
r, g, b = color
return png_save(mat, path, lambda x:(x*r, x*g, x*b))
def _get_colormap(colormap=None):
wall_r, wall_g, wall_b = config['pngcolors']['wall']
colormaps = {
'greyscale': lambda v: (v*255, v*255, v*255),
'wall' : lambda v: (v*wall_r, v*wall_g, v*wall_b)
}
if colormap is None:
out = _get_colormap('greyscale')
elif callable(colormap):
out = colormap
elif isinstance(colormap, tuple):
r, g, b = colormap
out = lambda v: (v*r, v*g, v*b)
elif colormap in colormaps:
out = colormaps.get(colormap)
else:
raise ValueError("Colormap not understood"
"Expecting a color (r, g, b) tuple, a callable f(x) -> (r, g, b)"
"or a preset: 'greyscale', 'wall', etc.")
return out
def png_save(mat, path, colormap=None):
"""Save a monochrome image matrix as a png
:param mat: a numpy array with shape=(height, width), with float values from 0-1
:param path: the path to save the matrix to
:param colormap: a function(x) -> (r, g, b), or the name of one
a preset ('grey', 'wall')
a color (r, g, b)
:return: None
"""
if len(mat.shape) == 3:
return _png_save_color(mat, path)
colormap = _get_colormap(colormap)
mat3 = apply_colormap(mat, colormap)
return _png_save_color(mat3, path)
def _png_save_color(mat, path):
import png
Y, X, b | |
<gh_stars>1-10
#!/usr/bin/env python3
import argparse
import ast
import datetime
import fnmatch
import hashlib
import os
import shutil
import subprocess
import sys
# Python 2 compatibility fix.
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
class Error(Exception):
pass
class _Path(object):
def __init__(self, path=[]):
if not path:
self._comps = []
elif isinstance(path, _Path):
self._comps = path._comps
elif isinstance(path, list):
self._comps = path
else:
assert isinstance(path, str), repr(path)
self._comps = [path]
def get_as_string(self):
return os.path.join(*tuple(self._comps))
def __add__(self, other):
return _Path(self._comps + _Path(other)._comps)
def get_dirname(self):
return _Path(self._comps[:-1])
def get_basename(self):
return self._comps[-1]
def add_extension(self, ext):
new_basename = self.get_basename() + ext
return self.get_dirname() + new_basename
def __iter__(self):
for comp in self._comps:
yield comp
class _Package(object):
def __init__(self, filehash, filename):
self._filehash = filehash
self._filename = filename
def get_filehash(self):
return self._filehash
def get_filename(self):
return self._filename
class _RepositoryIndex(object):
def __init__(self, collect_files=False):
self._collect_files = collect_files
if collect_files:
self._index = dict()
def add_file_path(self, path):
if not self._collect_files:
return
i = self._index
for comp in path.get_dirname():
i = i.setdefault(comp, dict())
i[path.get_basename()] = None
def get(self):
assert self._collect_files
index_copy = dict(self._index)
return index_copy
class _ComponentArch(object):
def __init__(self, arch_id, component):
self._id = arch_id
self._component = component
def get_id(self):
return self._id
def get_component(self):
return self._component
def get_component_id(self):
return self._component.get_id()
def get_distribution(self):
return self._component.get_distribution()
def get_packages(self):
return self._component.get_packages_in_arch(self)
def get_path_in_distribution(self):
return self._component.get_path_in_distribution()
class _Component(object):
def __init__(self, component_id, dist):
self._id = component_id
self._dist = dist
def get_id(self):
return self._id
def get_distribution(self):
return self._dist
def get_archs(self):
return self._dist.get_archs_in_component(self)
def get_packages_in_arch(self, arch):
assert isinstance(arch, _ComponentArch)
assert arch.get_component() is self
return self._dist.get_packages_in_component_arch(arch)
def get_path_in_distribution(self):
return _Path([self._id])
class _DistributionArch(object):
def __init__(self, arch_id, dist):
self._id = arch_id
self._dist = dist
def get_id(self):
return self._id
def get_distribution(self):
return self._dist
def get_packages(self):
return self._dist.get_packages_in_arch(self)
def get_path_in_distribution(self):
return _Path()
class _DistributionIndex(object):
def __init__(self):
self._index = dict()
def get(self):
return self._index
def add(self, path, hashes, filesize):
assert path not in self._index
self._index[path] = hashes, filesize
class _Distribution(object):
def __init__(self, dist_id, repo_index):
self._id = dist_id
self._index = _DistributionIndex()
self._repo_index = repo_index
self._packages = dict()
def get_id(self):
return self._id
def get_index(self):
return self._index
def get_repository_index(self):
return self._repo_index
def add_package(self, component_id, arch_id, package):
key = component_id, arch_id
filenames = self._packages.setdefault(key, dict())
filename = package.get_filename()
if filename in filenames:
full_component_id = '%s:%s' % (self._dist_id, component_id)
raise Error('More than one package %r in component %r, '
'architecture %r.' % (filename, full_component_id,
arch_id))
filenames[filename] = package.get_filehash()
# Returns components of this distribution.
def get_components(self):
ids = {component_id for component_id, arch_id in self._packages}
for id in ids:
yield _Component(id, self)
# Returns architectures of this distribution's component.
def get_archs_in_component(self, component):
assert component.get_distribution() is self
component_id = component.get_id()
ids = {arch_id for comp_id, arch_id in self._packages
if comp_id == component_id}
for id in ids:
yield _ComponentArch(id, component)
# Returns architectures of all components in this distribution.
def get_archs_in_all_components(self):
for component in self.get_components():
for arch in component.get_archs():
yield arch
# Returns architectures of this distribution.
def get_archs(self):
ids = {arch_id for component_id, arch_id in self._packages}
for id in ids:
yield _DistributionArch(id, self)
# Returns packages for specific component architecture in
# this distribution.
def get_packages_in_component_arch(self, arch):
assert isinstance(arch, _ComponentArch)
assert arch.get_distribution() is self
target_key = arch.get_component_id(), arch.get_id()
for key, filenames in self._packages.items():
if key == target_key:
for filename, filehash in filenames.items():
yield _Package(filehash, filename)
# Returns packages for a specific architecture in this distribution.
def get_packages_in_arch(self, arch):
assert isinstance(arch, _DistributionArch)
assert arch.get_distribution() is self
target_arch_id = arch.get_id()
for (component_id, arch_id), filenames in self._packages.items():
if arch_id == target_arch_id:
for filename, filehash in filenames.items():
yield _Package(filehash, filename)
class Repository(object):
_DEFAULT_CONFIG = {
'origin': 'Default Origin',
'label': 'Default Label',
'gpg_key_id': 'none',
}
_PACKAGE_FIELD = 'Package'
_SECTION_FIELD = 'Section'
_ARCH_FIELD = 'Architecture'
_MAKEAPT_FIELD_PREFIX = '__'
_CONTENTS_FIELD = '%scontents' % _MAKEAPT_FIELD_PREFIX
_FILESIZE_FIELD = '%sfilesize' % _MAKEAPT_FIELD_PREFIX
# We prefer these fields always be specified in this order.
_DEB_INFO_FIELDS = [
_PACKAGE_FIELD,
'Version',
_SECTION_FIELD,
'Priority',
_ARCH_FIELD,
'Installed-Size',
'Depends',
'Maintainer',
'Uploaders',
'Homepage',
'Description',
]
# The name of the directory where we store .deb files.
POOL_DIR_NAME = 'pool'
# The directory where we store distribution index files.
DISTS_DIR = _Path('dists')
# Canonical makeapt names of hash algorithms. APT
# repositories use different names for the same hash
# algorithms, so for internal use we have to define their
# canonical names.
_CANONICAL_HASH_NAMES = {
'md5': 'md5',
'MD5Sum': 'md5',
'MD5sum': 'md5',
'sha1': 'sha1',
'SHA1': 'sha1',
'sha256': 'sha256',
'SHA256': 'sha256',
'sha512': 'sha512',
'SHA512': 'sha512',
}
# The map of known hash algorithms. The keys are their
# canonical names.
_HASH_ALGOS = {
'md5': hashlib.md5,
'sha1': hashlib.sha1,
'sha256': hashlib.sha256,
'sha512': hashlib.sha512,
}
# The hash algorithm used to find identical packages.
_KEY_HASH_NAME = _CANONICAL_HASH_NAMES['sha1']
# The names of hashes used in main distribution indexes. Come
# in order we want them in the index files.
_DISTRIBUTION_INDEX_HASH_NAMES = [
'MD5Sum', # Note the uppercase 'S' in 'MD5Sum'.
'SHA1', 'SHA256']
# Buffer size for file I/O, in bytes.
_BUFF_SIZE = 4096
# Names of various makeapt files.
_CONFIG_FILENAME = 'config'
_INDEX_FILENAME = 'index'
_CACHE_FILENAME = 'cache'
def __init__(self, path='', use_makeapt_dir=True):
self._apt_path = _Path(path)
self._use_makeapt_dir = use_makeapt_dir
if use_makeapt_dir:
self._makeapt_path = self._apt_path + '.makeapt'
self._pool_path = self._apt_path + self.POOL_DIR_NAME
def __enter__(self):
# TODO: Lock the repository.
self._load_config()
self._load_index()
self._load_cache()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._flush_index()
self._flush_cache()
self._flush_config()
# TODO: Unlock the repository.
def get_config(self):
# Always return a copy of the actual config.
config_copy = dict(self._config)
return config_copy
def get_config_field(self, field):
config = self.get_config()
return config[field]
def set_config_field(self, field, value):
self._config[field] = value
def _make_dir(self, path):
path_string = path.get_as_string()
if not os.path.exists(path_string):
os.makedirs(path_string)
def _make_file_dir(self, path):
self._make_dir(path.get_dirname())
def _save_file(self, path, content):
self._make_file_dir(path)
with open(path.get_as_string(), 'wb') as f:
for chunk in content:
if isinstance(chunk, str):
chunk = chunk.encode('utf-8')
f.write(chunk)
def _gzip(self, path):
# TODO: Can _run_shell() return a generator?
# TODO: Should we do that with a Python library?
yield self._run_shell(['gzip', '--keep', '--best', '--no-name',
'--stdout', path.get_as_string()])
def _bzip2(self, path):
# TODO: Can _run_shell() return a generator?
# TODO: Should we do that with a Python library?
yield self._run_shell(['bzip2', '--keep', '--best',
'--stdout', path.get_as_string()])
def init(self):
'''Initializes APT repository.'''
if self._use_makeapt_dir:
self._make_dir(self._makeapt_path)
self._make_dir(self._pool_path)
# TODO: Should we make the 'dists' directory?
def _load_makeapt_file(self, filename, default):
if not self._use_makeapt_dir:
return default
try:
path = self._makeapt_path + filename
with open(path.get_as_string(), 'r') as f:
return ast.literal_eval(f.read())
except FileNotFoundError:
return default
# Writes a given value in consistent and human-readable way.
def _emit_literal(self, value, level=0):
indent = ' '
nested_level = level + 1
if isinstance(value, dict):
yield '{\n'
for key in sorted(value):
yield indent * nested_level
yield '%r: ' % key
for chunk in self._emit_literal(value[key], nested_level):
yield chunk
yield indent * level + '}'
elif isinstance(value, set):
yield '{\n'
for element in sorted(value):
yield indent * nested_level
for chunk in self._emit_literal(element, nested_level):
yield chunk
yield indent * level + '}'
else:
yield repr(value)
if level > 0:
yield ','
yield '\n'
def _save_makeapt_file(self, filename, value):
if not self._use_makeapt_dir:
return
path = self._makeapt_path + filename
with open(path.get_as_string(), 'w') as f:
for chunk in self._emit_literal(value):
f.write(chunk)
def _load_config(self):
default_config_copy = dict(self._DEFAULT_CONFIG)
config = self._load_makeapt_file(self._CONFIG_FILENAME,
default_config_copy)
# Make sure all fields are in place.
for field, default_value in self._DEFAULT_CONFIG.items():
if field not in config:
config[field] = default_value
self._config = config
def _flush_config(self):
self._save_makeapt_file(self._CONFIG_FILENAME, self._config)
del self._config
def _load_index(self):
index = self._load_makeapt_file(self._INDEX_FILENAME, dict())
# Fix the type of empty groups that 'literal_eval()'
# reads as dict's and not set's.
for filehash, filenames in index.items():
for filename, groups in filenames.items():
if isinstance(groups, dict):
assert not groups
groups = set()
filenames[filename] = groups
self._index = index
def _flush_index(self):
self._save_makeapt_file(self._INDEX_FILENAME, self._index)
del self._index
def _load_cache(self):
self._cache = self._load_makeapt_file(self._CACHE_FILENAME, dict())
def _flush_cache(self):
self._save_makeapt_file(self._CACHE_FILENAME, self._cache)
del self._cache
# Hashes a given file with a set of specified algorithms.
def _hash_file(self, path, hash_names):
# Handle the case when only one algorithm is specified.
if isinstance(hash_names, str):
hash_name = hash_names
return self._hash_file(path, {hash_name})[hash_name]
# Initialize messages.
msgs = {name: self._HASH_ALGOS[self._CANONICAL_HASH_NAMES[name]]()
for name in hash_names}
# Read out the file by chunks and update messages.
with open(path.get_as_string(), 'rb') as f:
for chunk in iter(lambda: f.read(self._BUFF_SIZE), b''):
for hash_name, msg in msgs.items():
msg.update(chunk)
return {hash_name: msg.hexdigest() for hash_name, msg | |
self.delta).astype(bool)
distance_factors.eliminate_zeros()
kernel_matrix = distance_factors
# return dist_knn, which is required for cdist_k_nearest_neighbor in
# order to do a follow-up cdist request (then as reference_dist_knn as input).
if is_pdist:
ret_cdist: Optional[Dict[str, np.ndarray]] = dict(
reference_dist_knn=dist_knn
)
else:
ret_cdist = None
return kernel_matrix, ret_cdist
class DmapKernelFixed(BaseManifoldKernel):
"""Diffusion map kernel with fixed kernel bandwidth.
This kernel wraps an kernel to describe a diffusion process.
Parameters
----------
internal_kernel
Kernel that describes the proximity between data points.
is_stochastic
If True, the kernel matrix is row-normalized.
alpha
Degree of re-normalization of sampling density in point cloud. `alpha` must be
inside the interval [0, 1] (inclusive).
symmetrize_kernel
If True, performs a conjugate transformation which can improve numerical
stability for matrix operations (such as computing eigenpairs). The matrix to
change the basis back is provided as a quantity of interest (see
possible return values in :meth:`PCManifoldKernel.__call__`).
See Also
--------
:py:class:`DiffusionMaps`
References
----------
:cite:`coifman_diffusion_2006`
"""
def __init__(
self,
internal_kernel: PCManifoldKernel = GaussianKernel(epsilon=1.0),
is_stochastic: bool = True,
alpha: float = 1.0,
symmetrize_kernel: bool = True,
):
self.is_stochastic = is_stochastic
if not (0 <= alpha <= 1):
raise ValueError(f"alpha has to be between [0, 1]. Got alpha={alpha}")
self.alpha = alpha
self.symmetrize_kernel = symmetrize_kernel
self.internal_kernel = internal_kernel
# i) not stochastic -> if the kernel is symmetric the kernel is always
# symmetric
# `symmetrize_kernel` indicates if the user wants the kernel to use
# similarity transformations to solve the
# eigenproblem on a symmetric kernel (if required).
# NOTE: a necessary condition to symmetrize the kernel is that the kernel
# is evaluated pairwise
# (i.e. is_pdist = True)
# self._is_symmetric = True
# else:
# self._is_symmetric = False
self.row_sums_init = None
super(DmapKernelFixed, self).__init__()
@property
def is_symmetric(self):
return self.symmetrize_kernel or not self.is_stochastic
def is_symmetric_transform(self) -> bool:
"""Indicates whether a symmetric conjugate kernel matrix was computed.
Returns
-------
"""
# If the kernel is made stochastic, it looses the symmetry, if symmetric_kernel
# is set to True, then apply the the symmetry transformation
return self.is_stochastic and self.is_symmetric
def _normalize_sampling_density(
self,
kernel_matrix: Union[np.ndarray, scipy.sparse.csr_matrix],
row_sums_alpha_fit: np.ndarray,
) -> Tuple[Union[np.ndarray, scipy.sparse.csr_matrix], Optional[np.ndarray]]:
"""Normalize (sparse/dense) kernels with positive `alpha` value. This is also
referred to a 'renormalization' of sampling density."""
if row_sums_alpha_fit is None:
assert is_symmetric_matrix(kernel_matrix)
else:
assert row_sums_alpha_fit.shape[0] == kernel_matrix.shape[1]
row_sums = kernel_matrix.sum(axis=1)
if scipy.sparse.issparse(kernel_matrix):
# np.matrix to np.ndarray
# (np.matrix is deprecated but still used in scipy.sparse)
row_sums = row_sums.A1
if self.alpha < 1:
if row_sums.dtype.kind != "f":
# This is required for case when 'row_sums' contains boolean or integer
# values; for inplace operations the type has to be the same
row_sums = row_sums.astype(float)
row_sums_alpha = np.power(row_sums, self.alpha, out=row_sums)
else: # no need to power with 1
row_sums_alpha = row_sums
normalized_kernel = _symmetric_matrix_division(
matrix=kernel_matrix,
vec=row_sums_alpha,
vec_right=row_sums_alpha_fit,
)
if row_sums_alpha_fit is not None:
# Set row_sums_alpha to None for security, because in a cdist-case (if
# row_sums_alpha_fit) there is no need to further process row_sums_alpha, yet.
row_sums_alpha = None
return normalized_kernel, row_sums_alpha
def _normalize(
self,
internal_kernel: KernelType,
row_sums_alpha_fit: np.ndarray,
is_pdist: bool,
):
# only required for symmetric kernel, return None if not used
basis_change_matrix = None
# required if alpha>0 and _normalize is called later for a cdist case
# set in the pdist, alpha > 0 case
row_sums_alpha = None
if self.is_stochastic:
if self.alpha > 0:
# if pdist: kernel is still symmetric after this function call
(internal_kernel, row_sums_alpha,) = self._normalize_sampling_density(
internal_kernel, row_sums_alpha_fit
)
if is_pdist and self.is_symmetric_transform():
# Increases numerical stability when solving the eigenproblem
# Note1: when using the (symmetric) conjugate matrix, the eigenvectors
# have to be transformed back to match the original
# Note2: the similarity transform only works for the is_pdist case
# (for cdist, there is no symmetric kernel in the first place,
# because it is generally rectangular and does not include self
# points)
(
internal_kernel,
basis_change_matrix,
) = _conjugate_stochastic_kernel_matrix(internal_kernel)
else:
internal_kernel = _stochastic_kernel_matrix(internal_kernel)
# check that if "is symmetric pdist" -> require basis change
# else no basis change
assert not (
(is_pdist and self.is_symmetric_transform())
^ (basis_change_matrix is not None)
)
if is_pdist and self.is_symmetric:
assert is_symmetric_matrix(internal_kernel)
return internal_kernel, basis_change_matrix, row_sums_alpha
def _validate_row_alpha_fit(self, is_pdist, row_sums_alpha_fit):
if (
self.is_stochastic
and self.alpha > 0
and not is_pdist
and row_sums_alpha_fit is None
):
raise ValueError(
"cdist request can not be carried out, if 'row_sums_alpha_fit=None'"
"Please consider to report bug."
)
def _eval(self, kernel_output, is_pdist, row_sums_alpha_fit):
self._validate_row_alpha_fit(
is_pdist=is_pdist, row_sums_alpha_fit=row_sums_alpha_fit
)
kernel_matrix, internal_ret_cdist, _ = PCManifoldKernel.read_kernel_output(
kernel_output=kernel_output
)
if isinstance(kernel_matrix, pd.DataFrame):
# store indices and cast to same type later
_type = type(kernel_matrix)
rows_idx, columns_idx = kernel_matrix.index, kernel_matrix.columns
kernel_matrix = kernel_matrix.to_numpy()
else:
_type, rows_idx, columns_idx = None, None, None
kernel_matrix, basis_change_matrix, row_sums_alpha = self._normalize(
kernel_matrix,
row_sums_alpha_fit=row_sums_alpha_fit,
is_pdist=is_pdist,
)
if rows_idx is not None and columns_idx is not None:
kernel_matrix = _type(kernel_matrix, index=rows_idx, columns=columns_idx)
if is_pdist:
ret_cdist = dict(
row_sums_alpha_fit=row_sums_alpha,
internal_kernel_kwargs=internal_ret_cdist,
)
ret_extra = dict(basis_change_matrix=basis_change_matrix)
else:
# no need for row_sums_alpha or the basis change matrix in the cdist case
ret_cdist = None
ret_extra = None
return kernel_matrix, ret_cdist, ret_extra
def __call__(
self,
X: np.ndarray,
Y: Optional[np.ndarray] = None,
*,
dist_kwargs: Optional[Dict] = None,
**kernel_kwargs,
) -> Tuple[
Union[np.ndarray, scipy.sparse.csr_matrix], Optional[Dict], Optional[Dict]
]:
"""Compute the diffusion map kernel.
Parameters
----------
X
Reference point cloud of shape `(n_samples_X, n_features_X)`.
Y
Query point cloud of shape `(n_samples_Y, n_features_Y)`. If not given,
then `Y=X`.
dist_kwargs
Keyword arguments passed to the internal distance matrix computation. See
:py:meth:`datafold.pcfold.compute_distance_matrix` for parameter arguments.
**kernel_kwargs: Dict[str, object]
- internal_kernel_kwargs: Optional[Dict]
Keyword arguments passed to the set internal kernel.
- row_sums_alpha_fit: Optional[np.ndarray]
Row sum values during re-normalization computed during pair-wise kernel
computation. The parameter is mandatory for the compontent-wise kernel
computation and if `alpha>0`.
Returns
-------
numpy.ndarray`, `scipy.sparse.csr_matrix`
kernel matrix (or conjugate of it) with same type and shape as
`distance_matrix`
Optional[Dict[str, numpy.ndarray]]
Row sums from re-normalization in key 'row_sums_alpha_fit', only returned for
pairwise computations. The values are required for follow up out-of-sample
kernel evaluations (`Y is not None`).
Optional[Dict[str, scipy.sparse.dia_matrix]]
Basis change matrix (sparse diagonal) if `is_symmetrize=True` and only
returned if the kernel matrix is a symmetric conjugate of the true
diffusion kernel matrix. Required to recover the diffusion map eigenvectors
from the symmetric conjugate matrix.
"""
is_pdist = Y is None
internal_kernel_kwargs, row_sums_alpha_fit = self._read_kernel_kwargs(
attrs=["internal_kernel_kwargs", "row_sums_alpha_fit"],
kernel_kwargs=kernel_kwargs,
)
kernel_output = self.internal_kernel(
X, Y=Y, dist_kwargs=dist_kwargs or {}, **internal_kernel_kwargs or {}
)
return self._eval(
kernel_output=kernel_output,
is_pdist=is_pdist,
row_sums_alpha_fit=row_sums_alpha_fit,
)
def eval(
self,
distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix],
is_pdist=False,
row_sums_alpha_fit=None,
):
"""Evaluate kernel on pre-computed distance matrix.
For return values see :meth:`.__call__`.
Parameters
----------
distance_matrix
Matrix of shape `(n_samples_Y, n_samples_X)`.
is_pdist:
If True, the distance matrix must be square
Returns
-------
"""
kernel_output = self.internal_kernel.eval(distance_matrix)
return self._eval(
kernel_output=kernel_output,
is_pdist=is_pdist,
row_sums_alpha_fit=row_sums_alpha_fit,
)
class ConeKernel(TSCManifoldKernel):
r"""Compute a dynamics adapted cone kernel for time series collection data.
The equations below describe the kernel evaluation and are taken from the referenced
paper below.
A single kernel evaluation between samples :math:`x` and :math:`y` is computed with
.. math::
K(x, y) = \exp
\left(
-\frac{\vert\vert \omega_{ij}\vert\vert^2}
{\varepsilon \delta t^2 \vert\vert \xi_i \vert\vert \vert\vert \xi_j \vert\vert }
\left[ (1-\zeta \cos^2 \theta_i)(1-\zeta \cos^2 \theta_j) \right]^{0.5}
\right)
where,
.. math::
\cos \theta_i =
\frac{(\xi_i, \omega_{ij})}
{\vert\vert \xi_i \vert\vert \vert\vert \omega_{ij} \vert\vert}
is the angle between samples,
.. math::
\omega_{ij} = y - x
is a difference vector between the point pairs,
.. math::
\delta t
is the (constant) time sampling in the time series,
.. math::
\varepsilon
is an additional scaling parameter of the kernel bandwidth,
.. math::
\zeta
is the parameter to control the angular influence, and
.. math::
\xi_i = \delta_p x_i = \sum_{j=-p/2}^{p/2} w_j x_{i+j}
is the approximation of the dynamical vector field. The approximation is carried
out with :math:`\delta_p`, a :math:`p`-th order accurate central finite difference
(in a sense that :math:`\frac{\xi}{\delta t} + \mathcal{O}(\delta t^p)`) with
associated weights :math:`w`.
.. note::
In the centered finite difference the time values are shifted such that no
samples are taken | |
<gh_stars>1-10
from datafs.managers.manager_dynamo import DynamoDBManager
from datafs.datafs import cli
from datafs._compat import u
from datafs import DataAPI, get_api, to_config_file
import os
from click.testing import CliRunner
import pytest
import traceback
import ast
import re
@pytest.yield_fixture(scope='module')
def manager_table():
# setup manager table
table_name = 'my-cli-test-table'
manager = DynamoDBManager(
table_name,
session_args={
'aws_access_key_id': "access-key-id-of-your-choice",
'aws_secret_access_key': "secret-key-of-your-choice"},
resource_args={
'endpoint_url': 'http://localhost:8000/',
'region_name': 'us-east-1'})
manager.create_archive_table(table_name, raise_on_err=False)
try:
yield table_name
finally:
manager.delete_table(table_name)
@pytest.yield_fixture(scope='module')
def sample_config(manager_table, temp_dir_mod, temp_file):
my_test_yaml = r'''
default-profile: myapi
profiles:
myapi:
api:
user_config:
username: 'My Name'
contact: '<EMAIL>'
authorities:
local:
args:
- "{dir}"
service: OSFS
manager:
class: "DynamoDBManager"
kwargs:
resource_args:
endpoint_url: "http://localhost:8000/"
region_name: "us-east-1"
session_args:
aws_access_key_id: "access-key-id-of-your-choice"
aws_secret_access_key: "secret-key-of-your-choice"
table_name: "{table}"
'''.format(table=manager_table, dir=temp_dir_mod)
with open(temp_file, 'w+') as f:
f.write(my_test_yaml)
yield 'myapi', temp_file
@pytest.yield_fixture(scope='module')
def preloaded_config(sample_config):
'''
Prepare a manager/auth config with 3 archives, each having 3 versions
.. note::
To save on test runtime, scope == module. Tests should not modify
these archives.
'''
profile, temp_file = sample_config
api = get_api(profile=profile, config_file=temp_file)
# Set up a couple archives with multiple versions
arch1 = api.create('/req/arch1')
arch2 = api.create('/req/arch2')
arch3 = api.create('/req/arch3')
with arch1.open('w+', bumpversion='minor', message='bumping to 0.1') as f:
f.write(u'this is archive /req/arch1 version 0.1')
with arch1.open('w+', bumpversion='major', message='bumping to 1.0') as f:
f.write(u'this is archive /req/arch1 version 1.0')
with arch1.open('w+', bumpversion='minor', message='bumping to 1.1') as f:
f.write(u'this is archive /req/arch1 version 1.1')
arch1_versions = arch1.get_versions()
assert '0.1' in arch1_versions
assert '1.0' in arch1_versions
assert '1.1' in arch1_versions
with arch2.open('w+', prerelease='alpha') as f:
f.write(u'this is archive /req/arch2 version 0.0.1a1')
with arch2.open('w+', prerelease='alpha') as f:
f.write(u'this is archive /req/arch2 version 0.0.1a2')
with arch2.open('w+', bumpversion='patch') as f:
f.write(u'this is archive /req/arch2 version 0.0.1')
arch2_versions = arch2.get_versions()
assert '0.0.1a1' in arch2_versions
assert '0.0.1a2' in arch2_versions
assert '0.0.1' in arch2_versions
with arch3.open('w+', bumpversion='major') as f:
f.write(u'this is archive /req/arch3 version 1.0')
with arch3.open('w+', bumpversion='minor', prerelease='alpha') as f:
f.write(u'this is archive /req/arch3 version 1.1a1')
with arch3.open('w+', bumpversion='minor') as f:
f.write(u'this is archive /req/arch3 version 1.1')
arch3_versions = arch3.get_versions()
assert '1.0' in arch3_versions
assert '1.1a1' in arch3_versions
assert '1.1' in arch3_versions
# Set up an unversioned archive with multiple versions
arch_uver = api.create('uver1', versioned=False)
with arch_uver.open('w+', message='bumping to 0.1') as f:
f.write(u'this is archive uver1 version 0.1')
with arch_uver.open('w+', message='bumping to 1.0') as f:
f.write(u'this is archive uver1 version 1.0')
with arch_uver.open('w+', message='bumping to 1.1') as f:
f.write(u'this is archive uver1 version 1.1')
arch_uver_versions = arch_uver.get_history()
assert len(arch_uver_versions) == 3
try:
yield profile, temp_file
finally:
arch1.delete()
arch2.delete()
arch3.delete()
arch_uver.delete()
@pytest.mark.cli
def test_cli_local(sample_config):
profile, temp_file = sample_config
prefix = ['--config-file', temp_file, '--profile', 'myapi']
api2 = get_api(profile=profile, config_file=temp_file)
runner = CliRunner()
# test for configure and create archive
result = runner.invoke(cli,
prefix + ['create',
'my_first_archive',
'--description',
'My test data archive'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
res = 'created versioned archive <DataArchive local://my_first_archive>'
assert result.output.strip() == res
result = runner.invoke(cli, prefix + ['filter'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
assert 'my_first_archive' in result.output.strip().split('\n')
assert len(result.output.strip().split('\n')) == len(list(api2.filter()))
archive = api2.get_archive('my_first_archive')
assert archive.archive_name == 'my_first_archive'
# testing the `metadata` option
result = runner.invoke(cli, prefix + ['metadata', 'my_first_archive'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
metadata = ast.literal_eval(result.output)
assert metadata['description'] == 'My test data archive'
# test the api side of the operation
assert u'My test data archive' == archive.get_metadata()['description']
with runner.isolated_filesystem():
with open('hello.txt', 'w') as f:
f.write('Hoo Yah! Stay Stoked!')
# update using CLI
result = runner.invoke(
cli,
prefix + [
'update',
'my_first_archive',
'hello.txt',
'--source',
'Surfers Journal'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
# assert that we get update feedback
expected = 'uploaded data to <DataArchive local://my_first_archive>'
assert expected in result.output
# lets read the file to make sure it remains unchanged
with open('hello.txt', 'r') as f:
data = f.read()
assert data == 'Hoo Yah! Stay Stoked!'
# Try re-upload
result = runner.invoke(
cli,
prefix + [
'update',
'my_first_archive',
'hello.txt',
'--source',
'Surfers Journal'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
# assert that we get update feedback
intended_output = ('uploaded data to <DataArchive '
'local://my_first_archive>. version remains 0.0.1.')
assert intended_output == result.output.strip()
# this is testing the feed through on the api
with api2.get_archive(list(api2.filter())[0]).open('r') as f:
data = f.read()
assert data == 'Hoo Yah! Stay Stoked!'
# lets check to make sure our metadata update also passed through
assert 'Surfers Journal' == api2.get_archive(
list(api2.filter())[0]).get_metadata()['source']
# test to assert metadata update
# test to assert file content change
with runner.isolated_filesystem():
result = runner.invoke(cli,
prefix + ['update',
'my_first_archive',
'--bumpversion',
'minor',
'--string',
'new version data'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
result = runner.invoke(cli, prefix + ['cat', 'my_first_archive'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
'new version data' in result.output
result = runner.invoke(cli,
prefix + ['download',
'my_first_archive',
'here.txt',
'--version',
'0.0.1'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
'Hoo Yah! Stay Stoked!' in result.output
# test download of previous version
result = runner.invoke(cli,
prefix + ['download',
'my_first_archive',
'here.txt',
'--version',
'0.0.1'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
with open('here.txt', 'r') as downloaded:
assert downloaded.read() == 'Hoo Yah! Stay Stoked!'
# test download of nonexistant version (should fail without overwriting
# file)
result = runner.invoke(cli,
prefix + ['download',
'my_first_archive',
'here.txt',
'--version',
'3.0'])
assert result.exit_code != 0
with open('here.txt', 'r') as downloaded:
assert downloaded.read() == 'Hoo Yah! Stay Stoked!'
os.remove('here.txt')
# teardown
result = runner.invoke(cli, prefix + ['delete', 'my_first_archive'])
result = runner.invoke(cli, prefix + ['filter'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
assert result.output.strip() == ''
assert len(list(api2.filter())) == 0
@pytest.mark.cli
def test_cli_unversioned(sample_config):
profile, temp_file = sample_config
prefix = ['--config-file', temp_file, '--profile', 'myapi']
api2 = get_api(profile=profile, config_file=temp_file)
runner = CliRunner()
# test for configure and create archive
result = runner.invoke(
cli,
prefix + [
'create', 'unversioned', '--not-versioned'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
res = 'created archive <DataArchive local://unversioned>'
assert result.output.strip() == res
result = runner.invoke(cli, prefix + ['filter'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
assert ['unversioned'] == [result.output.strip()]
# test the actual creation of the object from the api side
assert len(list(api2.filter())) == 1
archive = api2.get_archive('unversioned')
assert archive.archive_name == 'unversioned'
with runner.isolated_filesystem():
with open('hello.txt', 'w') as f:
f.write('un-versioned data')
# update using CLI
result = runner.invoke(
cli,
prefix + [
'update',
'unversioned',
'hello.txt',
'--dependency',
'arch1',
'--dependency',
'arch2'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
# assert that we get update feedback
expected = 'uploaded data to <DataArchive local://unversioned>.'
assert expected == result.output.strip()
# Try re-upload
result = runner.invoke(
cli,
prefix + [
'update',
'unversioned',
'--string',
'new content'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
# assert that we get update feedback
intended_output = 'uploaded data to <DataArchive local://unversioned>.'
assert intended_output == result.output.strip()
with runner.isolated_filesystem():
# test download
result = runner.invoke(cli, prefix +
['download', 'unversioned', 'here.txt'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
with open('here.txt', 'r') as downloaded:
assert downloaded.read() == 'new content'
# test download with 'latest' version argument'
result = runner.invoke(
cli, prefix + [
'download', 'unversioned', 'here.txt', '--version', 'latest'])
assert result.exit_code != 0
# test download with incorrect version argument
result = runner.invoke(
cli, prefix + [
'download', 'unversioned', 'here.txt', '--version', '0.0.1'])
assert result.exit_code != 0
os.remove('here.txt')
# teardown
result = runner.invoke(cli, prefix + ['delete', 'unversioned'])
result = runner.invoke(cli, prefix + ['filter'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
assert result.output.strip() == ''
assert len(list(api2.filter())) == 0
@pytest.mark.cli
def test_specified_requirements(preloaded_config):
'''
Test download commands with a mix of requirements file, explicit, and
unspecified version requirements
'''
profile, temp_file = preloaded_config
# Create a requirements file and
runner = CliRunner()
prefix = [
'--config-file', '{}'.format(temp_file),
'--profile', 'myapi',
'--requirements', 'requirements_data_test1.txt']
with runner.isolated_filesystem():
# Create requirements file
with open('requirements_data_test1.txt', 'w+') as reqs:
reqs.write('/req/arch1==1.0\n')
reqs.write('/req/arch2==0.0.1a2\n\n')
# Download /req/arch1 with version from requirements file
result = runner.invoke(
cli,
prefix + ['download', '/req/arch1', 'local_req_1.txt'])
if result.exit_code != 0:
traceback.print_exception(*result.exc_info)
raise OSError('Errors encountered during execution')
with open('local_req_1.txt', 'r') as f:
assert f.read() == 'this is | |
<reponame>cstone112/content<gh_stars>1-10
'''IMPORTS'''
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401 # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa: F401
import base64
import json
import requests
import re
from datetime import datetime, timezone, timedelta
from typing import Any, Dict, Union
from requests.models import HTTPError
'''CONSTANTS'''
URL = 'url'
POST = 'post'
GET = 'get'
AUTHORIZATION = 'Authorization'
BEARER = 'Bearer '
CONTENT_TYPE_JSON = 'application/json'
EMPTY_STRING = ''
ASCII = 'ascii'
API_TOKEN = 'apikey'
VALUE_TYPE = 'value_type'
TARGET_VALUE = 'target_value'
PRODUCT_ID = 'product_id'
DESCRIPTION = 'description'
MESSAGE_ID = 'message_id'
MAILBOX = 'mailbox'
MESSAGE_DELIVERY_TIME = 'message_delivery_time'
COMPUTER_ID = 'computer_id'
FIELD = 'field'
ENDPOINT = 'endpoint'
DATA = 'data'
TYPE = 'type'
VALUE = 'value'
FILESHA = 'file_sha1'
FILENAME = 'filename'
CRITERIA = 'criteria'
EXCEPTION_LIST = 'exceptionList'
SUSPICIOUS_LIST = 'suspiciousObjectList'
LAST_MODIFIED = 'lastModified'
SCAN_ACTION = 'scan_action'
RISK_LEVEL = 'risk_level'
EXPIRYDAY = 'expiry_days'
TASKID = 'task_id'
REPORT_ID = 'report_id'
OS_TYPE = 'os'
FILE_PATH = 'file_path'
FILE_URL = 'file_url'
FILE_NAME = 'filename'
DOCUMENT_PASSWORD = '<PASSWORD>'
ARCHIVE_PASSWORD = '<PASSWORD>'
ACTION_ID = 'actionId'
# End Points
ADD_BLOCKLIST_ENDPOINT = '/v2.0/xdr/response/block'
REMOVE_BLOCKLIST_ENDPOINT = '/v2.0/xdr/response/restoreBlock'
QUARANTINE_EMAIL_ENDPOINT = '/v2.0/xdr/response/quarantineMessage'
DELETE_EMAIL_ENDPOINT = '/v2.0/xdr/response/deleteMessage'
ISOLATE_CONNECTION_ENDPOINT = '/v2.0/xdr/response/isolate'
TERMINATE_PROCESS_ENDPOINT = '/v2.0/xdr/response/terminateProcess'
RESTORE_CONNECTION_ENDPOINT = '/v2.0/xdr/response/restoreIsolate'
ADD_OBJECT_TO_EXCEPTION_LIST = '/v2.0/xdr/threatintel/suspiciousObjects/exceptions'
DELETE_OBJECT_FROM_EXCEPTION_LIST = '/v2.0/xdr/threatintel/suspiciousObjects/exceptions/delete'
ADD_OBJECT_TO_SUSPICIOUS_LIST = '/v2.0/xdr/threatintel/suspiciousObjects'
DELETE_OBJECT_FROM_SUSPICIOUS_LIST = '/v2.0/xdr/threatintel/suspiciousObjects/delete'
TASK_DETAIL_ENDPOINT = '/v2.0/xdr/response/getTask'
GET_COMPUTER_ID_ENDPOINT = '/v2.0/xdr/eiqs/query/agentInfo'
GET_ENDPOINT_INFO_ENDPOINT = '/v2.0/xdr/eiqs/query/endpointInfo'
GET_FILE_STATUS = '/v2.0/xdr/sandbox/tasks/{taskId}'
GET_FILE_REPORT = '/v2.0/xdr/sandbox/reports/{reportId}'
COLLECT_FORENSIC_FILE = '/v2.0/xdr/response/collectFile'
DOWNLOAD_INFORMATION_COLLECTED_FILE = '/v2.0/xdr/response/downloadInfo'
SUBMIT_FILE_TO_SANDBOX = '/v2.0/xdr/sandbox/file'
WORKBENCH_HISTORIES = '/v2.0/xdr/workbench/workbenchHistories'
# Error Messages
RESPONSE_ERROR = 'Error in API call: [%d] - %s'
RETRY_ERROR = 'The max tries exceeded [%d] - %s'
COMMAND_CALLED = 'Command being called is {command}'
COMMAND_EXECUTION_ERROR = 'Failed to execute {error} command. Error'
AUTHORIZATION_ERROR = "Authorization Error: make sure URL/API Key is correctly set. Error - {error}"
PARAMETER_ISSUE = '{param} is not a valid paramter. Kindly provide valid parameter'
FILE_TYPE_ERROR = "Kindly provide valid file 'type'"
FILE_NOT_FOUND = 'No such file present in {filepath}'
# General Messages:
RAW_RESPONSE = "The raw response data - {raw_response}"
SUCCESS_RESPONSE = 'success with url {url} and response status {status}'
EXCEPTION_MESSAGE = "Successfully {task} object to exception list with response {code}, Total items in exception list - {length}"
SUCCESS_TEST = 'Successfully connected to the vision one API.'
POLLING_MESSAGE = (
"The task has not completed, will check status again in 30 seconds"
)
# Table Heading
TABLE_ADD_TO_BLOCKLIST = 'Add to block list '
TABLE_REMOVE_FROM_BLOCKLIST = 'Remove from block list '
TABLE_QUARANTINE_EMAIL_MESSAGE = 'Quarantine email message '
TABLE_DELETE_EMAIL_MESSAGE = 'Delete email message '
TABLE_ISOLATE_ENDPOINT_MESSAGE = 'Isolate endpoint connection '
TABLE_RESTORE_ENDPOINT_MESSAGE = 'Restore endpoint connection '
TABLE_TERMINATE_PROCESS = 'Terminate process '
TABLE_ADD_EXCEPTION_LIST = 'Add object to exception list '
TABLE_DELETE_EXCEPTION_LIST = 'Delete object from exception list '
TABLE_ADD_SUSPICIOUS_LIST = 'Add object to suspicious list '
TABLE_ENDPOINT_INFO = 'Endpoint info '
TABLE_DELETE_SUSPICIOUS_LIST = 'Delete object from suspicious list '
TABLE_GET_FILE_ANALYSIS_STATUS = 'File analysis status '
TABLE_GET_FILE_ANALYSIS_REPORT = 'File analysis report '
TABLE_COLLECT_FILE = 'Collect forensic file '
TABLE_COLLECTED_FORENSIC_FILE_DOWNLOAD_INFORMATION = 'The download information for collected forensic file '
TABLE_SUBMIT_FILE_TO_SANDBOX = 'Submit file to sandbox '
# COMMAND NAMES
ADD_BLOCKLIST_COMMAND = 'trendmicro-visionone-add-to-block-list'
REMOVE_BLOCKLIST_COMMAND = 'trendmicro-visionone-remove-from-block-list'
QUARANTINE_EMAIL_COMMAND = 'trendmicro-visionone-quarantine-email-message'
DELETE_EMAIL_COMMAND = 'trendmicro-visionone-delete-email-message'
ISOLATE_ENDPOINT_COMMAND = 'trendmicro-visionone-isolate-endpoint'
RESTORE_ENDPOINT_COMMAND = 'trendmicro-visionone-restore-endpoint-connection'
TERMINATE_PROCESS_COMMAND = 'trendmicro-visionone-terminate-process'
ADD_EXCEPTION_LIST_COMMAND = 'trendmicro-visionone-add-objects-to-exception-list'
DELETE_EXCEPTION_LIST_COMMAND = 'trendmicro-visionone-delete-objects-from-exception-list'
ADD_SUSPICIOUS_LIST_COMMAND = 'trendmicro-visionone-add-objects-to-suspicious-list'
DELETE_SUSPICIOUS_LIST_COMMAND = 'trendmicro-visionone-delete-objects-from-suspicious-list'
GET_FILE_ANALYSIS_STATUS = 'trendmicro-visionone-get-file-analysis-status'
GET_FILE_ANALYSIS_REPORT = 'trendmicro-visionone-get-file-analysis-report'
COLLECT_FILE = 'trendmicro-visionone-collect-forensic-file'
DOWNLOAD_COLLECTED_FILE = 'trendmicro-visionone-download-information-for-collected-forensic-file'
FILE_TO_SANDBOX = 'trendmicro-visionone-submit-file-to-sandbox'
CHECK_TASK_STATUS = 'trendmicro-visionone-check-task-status'
GET_ENDPOINT_INFO_COMMAND = 'trendmicro-visionone-get-endpoint-info'
FETCH_INCIDENTS = 'fetch-incidents'
table_name = {
ADD_BLOCKLIST_COMMAND: TABLE_ADD_TO_BLOCKLIST,
REMOVE_BLOCKLIST_COMMAND: TABLE_REMOVE_FROM_BLOCKLIST,
QUARANTINE_EMAIL_COMMAND: TABLE_QUARANTINE_EMAIL_MESSAGE,
DELETE_EMAIL_COMMAND: TABLE_DELETE_EMAIL_MESSAGE,
ISOLATE_ENDPOINT_COMMAND: TABLE_ISOLATE_ENDPOINT_MESSAGE,
RESTORE_ENDPOINT_COMMAND: TABLE_RESTORE_ENDPOINT_MESSAGE,
ADD_EXCEPTION_LIST_COMMAND: TABLE_ADD_EXCEPTION_LIST,
DELETE_EXCEPTION_LIST_COMMAND: TABLE_DELETE_EXCEPTION_LIST,
ADD_SUSPICIOUS_LIST_COMMAND: TABLE_ADD_SUSPICIOUS_LIST,
GET_ENDPOINT_INFO_COMMAND: TABLE_ENDPOINT_INFO,
DELETE_SUSPICIOUS_LIST_COMMAND: TABLE_DELETE_SUSPICIOUS_LIST
}
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
def check_datetime_aware(d):
return (d.tzinfo is not None) and (d.tzinfo.utcoffset(d) is not None)
class Client(BaseClient):
def __init__(self, base_url: str, api_key: str) -> None:
"""
Inherit the BaseClient class from the demistomock.
:type base_url: ``str``
:param base_url: Base server address with suffix, for example: https://example.com/api/v2/.
:type api_key: ``str``
:param api_key: api token to access the api data.
:type verify: ``bool``
:param verify: Whether the request should verify the SSL certificate.
:return: returns None
:rtype: ``None``
"""
super().__init__(base_url=base_url)
self.base_url = base_url
self.api_key = api_key
self.status = None
def http_request(self, method: str, url_suffix: str, json_data=None, params=None, data=None) -> Any:
"""
Override http_request method from BaseClient class. This method will print an error based on status code
and exceptions.
:type method: ``str``
:param method: The HTTP method, for example: GET, POST, and so on.
:type url_suffix: ``str``
:param url_suffix: The API endpoint.
:type json_data: ``dict``
:param json_data: The dictionary to send in a 'POST' request.
:type params: ``dict``
:param params: URL parameters to specify the query.
:type data: ``dict``
:param data: The data to send in a 'POST' request.
:return: response data
:rtype: ``dict`` or ``str`` or ``requests.Response``
"""
header = {
"Authorization": "Bearer {token}".format(token=self.api_key),
"Content-Type": f'{CONTENT_TYPE_JSON}{";charset=utf-8"}'
}
try:
response = self._http_request(method=method, full_url=f'{self.base_url}{url_suffix}', retries=3, json_data=json_data,
params=params, headers=header, resp_type='response',
ok_codes=(200, 201), data=data)
except DemistoException as error:
demisto.error(error.message)
return_error(error.message)
if response.ok:
demisto.info(SUCCESS_RESPONSE.format(url=f'{self.base_url}{url_suffix}', status=response.status_code))
self.status = response.status_code
content_type = response.headers.get('Content-Type', '')
if content_type.__contains__(CONTENT_TYPE_JSON):
# Handle empty response
if response.text == EMPTY_STRING:
return response
else:
return response.json()
else:
return response
def status_check(self, data: Dict[str, Any]) -> Any:
"""
Check the status of particular task.
:type data: ``dict``
:param method: Response data to received from the end point.
:return: task status response data.
:rtype: ``Any``
"""
action_id = data.get(ACTION_ID)
params = {"actionId": action_id}
response = self.http_request(GET, TASK_DETAIL_ENDPOINT, params=params)
message = {
"actionId": action_id,
"taskStatus": response.get("data").get("taskStatus")
}
return CommandResults(
readable_output=tableToMarkdown("Status of task ", message, removeNull=True),
outputs_prefix=(
"VisionOne.Task_Status"
),
outputs_key_field="actionId",
outputs=message)
def lookup_type(self, param: Any) -> str:
# Regex expression for validating IPv4
regex = "(([0-9]|[1-9][0-9]|1[0-9][0-9]|"\
"2[0-4][0-9]|25[0-5])\\.){3}"\
"([0-9]|[1-9][0-9]|1[0-9][0-9]|"\
"2[0-4][0-9]|25[0-5])"
# Regex expression for validating IPv6
regex1 = "((([0-9a-fA-F]){1,4})\\:){7}"\
"([0-9a-fA-F]){1,4}"
# Regex expression for validating mac
regex2 = "([0-9A-Fa-f]{2}[:-]){5}"\
"([0-9A-Fa-f]{2})"
p = re.compile(regex)
p1 = re.compile(regex1)
p2 = re.compile(regex2)
# Checking if it is a valid IPv4 addresses
if (re.search(p, param)):
return "ip"
# Checking if it is a valid IPv6 addresses
elif (re.search(p1, param)):
return "ipv6"
# Checking if it is a valid IPv6 addresses
elif (re.search(p2, param)):
return "macaddr"
# Otherwise use hostname type
return "hostname"
def get_computer_id(self, field: Any, value: Any) -> str:
"""
Fetch particular computer id using hostname, macaddress or ip.
:type field: ``str``
:param field: type of field to search hostname, macaddress or ip.
:type value: ``str``
:param value: value of the particular field.
:return: value of computer id.
:rtype: ``str``
"""
body = {
CRITERIA: {
FIELD: field,
VALUE: value
}
}
response = self.http_request(POST, GET_COMPUTER_ID_ENDPOINT, data=json.dumps(body))
if response["status"] == 'FAIL':
return_error("kindly provide valid field value")
computer_id = response.get("result").get("computerId")
return computer_id
def exception_list_count(self) -> int:
"""
Gets the count of object present in exception list
:return: number of exception object.
:rtype: ``int``
"""
response = self.http_request(GET, ADD_OBJECT_TO_EXCEPTION_LIST)
list_of_exception = response.get(DATA).get(EXCEPTION_LIST)
exception_count = len(list_of_exception)
return exception_count
def suspicious_list_count(self) -> int:
"""
Gets the count of object present in suspicious list
:return: number of suspicious object.
:rtype: ``int``
"""
response = self.http_request(GET, ADD_OBJECT_TO_SUSPICIOUS_LIST)
list_of_exception = response.get(DATA).get(SUSPICIOUS_LIST)
exception_count = len(list_of_exception)
return exception_count
def get_workbench_histories(self, start, end, offset=None, size=None) -> str:
if not check_datetime_aware(start):
start = start.astimezone()
if not check_datetime_aware(end):
end = end.astimezone()
start = start.astimezone(timezone.utc)
end = end.astimezone(timezone.utc)
start = start.isoformat(timespec='milliseconds').replace('+00:00', 'Z')
end = end.isoformat(timespec='milliseconds').replace('+00:00', 'Z')
params = dict([('startTime', start),
('endTime', end),
('sortBy', 'createdTime')]
+ ([('offset', offset)] if offset is not None else [])
+ ([('limit', size)] if size is not None else []))
response = self.http_request(GET, WORKBENCH_HISTORIES, params=params)['data']['workbenchRecords']
return response
def run_polling_command(
args: Dict[str, Any],
cmd: str, client: Client
) -> Union[str, CommandResults]:
"""
Performs polling interval to check status of task.
:type args: ``args``
:param client: argument required for polling.
:type client: ``cmd``
:param client: The command that polled for an interval.
:type client: ``Client``
:param client: client object to use http_request.
"""
ScheduledCommand.raise_error_if_not_supported()
interval_in_secs = int(args.get('interval_in_seconds', 30))
command_results = client.status_check(args)
action_id = args.get("actionId")
if command_results.outputs.get(
"taskStatus") not in (
"success", "failed", "timeout", "skipped"):
# schedule next poll
polling_args = {
'actionId': action_id,
'interval_in_seconds': interval_in_secs,
'polling': True,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_secs,
args=polling_args,
timeout_in_seconds=1500) # The timeout interval set for 25 minutes.
command_results = CommandResults(scheduled_command=scheduled_command)
return command_results
def get_task_status(
args: Dict[str, Any],
client: Client
) -> Union[str, CommandResults]:
"""
check status of task.
:type args: ``args``
:param client: argument required for polling.
:type client: ``Client``
:param client: client object to use http_request.
"""
return run_polling_command(args, CHECK_TASK_STATUS, client)
def test_module(client: Client) -> Any:
"""
Performs basic get request to get item samples.
:type client: ``Client``
:param client: client object to use http_request.
"""
client.http_request('GET', '/v2.0/xdr/threatintel/suspiciousObjects/exceptions')
return 'ok'
def get_endpoint_info(
client: Client, args: Dict[str, Any]
) -> Union[str, CommandResults]:
"""
Retrieve information abouut the endpoint queried and
sends the result to demisto war room.
:type client: ``Client``
:param client: client object to | |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
to_list = ak._v2.operations.convert.to_list
def test_keep_None_in_place_test():
v2_array = ak._v2.highlevel.Array([[3, 2, 1], [], None, [4, 5]]).layout
assert to_list(v2_array.argsort(axis=1)) == [
[2, 1, 0],
[],
None,
[0, 1],
]
assert to_list(v2_array.sort(axis=1)) == [
[1, 2, 3],
[],
None,
[4, 5],
]
assert to_list(v2_array.sort(axis=1)) == [[1, 2, 3], [], None, [4, 5]]
assert v2_array.typetracer.sort(axis=1).form == v2_array.argsort(axis=1).form
assert to_list(v2_array.argsort(axis=1)) == [[2, 1, 0], [], None, [0, 1]]
def test_keep_None_in_place_test_2():
v2_array = ak._v2.highlevel.Array([[3, 2, 1], [], None, [4, 5]]).layout
assert v2_array.typetracer.argsort(axis=1).form == v2_array.argsort(axis=1).form
@pytest.mark.skip(reason="FIXME: v2 highlevel argsort has not been implemented yet")
def test_empty_slice():
electron = ak._v2.highlevel.Array(
ak._v2.contents.ListOffsetArray(
ak._v2.index.Index64(np.array([0, 0, 1], np.int64)),
ak._v2.contents.RecordArray(
[ak._v2.contents.NumpyArray(np.array([1.0]))],
["pt"],
parameters={"__record__": "Electron"},
),
)
)
v2_electron = electron.layout[[[], []]]
assert to_list(v2_electron) == [[], []]
id = ak._v2.operations.structure.argsort(electron, axis=1)
assert to_list(v2_electron[id]) == [[], []]
assert v2_electron.typetracer[id].form == v2_electron[id].form
def test_masked():
v2_array = ak._v2.highlevel.Array([[0, 1, 2, 3], [3, 3, 3, 2, 1]])
is_valid = v2_array != 3
v2_array_mask = ak._v2.highlevel.Array(
ak._v2.contents.ListOffsetArray(
v2_array.layout.offsets,
ak._v2.contents.ByteMaskedArray(
ak._v2.index.Index8(is_valid.layout.content.data),
v2_array.layout.content,
valid_when=True,
),
)
)
assert to_list(v2_array_mask) == [
[0, 1, 2, None],
[None, None, None, 2, 1],
]
assert to_list(v2_array_mask.layout.sort(axis=1)) == [
[0, 1, 2, None],
[1, 2, None, None, None],
]
assert (
v2_array_mask.layout.typetracer.sort(axis=1).form
== v2_array_mask.layout.sort(axis=1).form
)
def test_v1_argsort_and_v2_sort():
v2_array = ak._v2.highlevel.Array([1, 2, None, 3, 0, None]).layout
assert to_list(v2_array.sort()) == [
0,
1,
2,
3,
None,
None,
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_v1_argsort_2d_and_v2_sort():
v2_array = ak._v2.highlevel.Array(
[[1, 2, None, 3, 0, None], [1, 2, None, 3, 0, None]]
).layout
assert to_list(v2_array.sort()) == [
[
0,
1,
2,
3,
None,
None,
],
[
0,
1,
2,
3,
None,
None,
],
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_nan():
v2_array = ak._v2.highlevel.Array([1, 2, np.nan, 3, 0, np.nan]).layout
assert str(to_list(v2_array.sort())) == "[nan, nan, 0.0, 1.0, 2.0, 3.0]"
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_sort_strings():
v2_array = ak._v2.highlevel.Array(
["one", "two", "three", "four", "five", "six", "seven", "eight"]
).layout
assert to_list(v2_array) == [
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
"eight",
]
assert to_list(v2_array.sort()) == [
"eight",
"five",
"four",
"one",
"seven",
"six",
"three",
"two",
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_sort_nested_strings():
v2_array = ak._v2.highlevel.Array(
[["one", "two"], ["three", "four", "five"], ["six"], ["seven", "eight"]]
).layout
assert to_list(v2_array) == [
["one", "two"],
["three", "four", "five"],
["six"],
["seven", "eight"],
]
assert to_list(v2_array.sort()) == [
["one", "two"],
["five", "four", "three"],
["six"],
["eight", "seven"],
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_sort_invalid_axis():
v2_array = ak._v2.operations.convert.from_numpy(
np.array([[3.3, 2.2], [1.1, 5.5], [4.4, 6.6]]),
regulararray=True,
highlevel=False,
)
with pytest.raises(ValueError) as err:
v2_array.sort(axis=3)
assert str(err.value).startswith(
"axis=3 exceeds the depth of the nested list structure (which is 2)"
)
def test_numpy_array_iscontiguous():
matrix = np.arange(64).reshape(8, -1)
v2_layout = ak._v2.contents.NumpyArray(matrix[:, 0])
assert not v2_layout.is_contiguous
assert to_list(v2_layout) == [0, 8, 16, 24, 32, 40, 48, 56]
matrix2 = np.arange(64).reshape(8, -1)
v2_array = ak._v2.contents.NumpyArray(matrix2[:, 0])
assert not v2_array.is_contiguous
assert to_list(v2_array.sort()) == [0, 8, 16, 24, 32, 40, 48, 56]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_numpyarray_sort():
v2_array = ak._v2.operations.convert.from_numpy(
np.array([3.3, 2.2, 1.1, 5.5, 4.4]), regulararray=True, highlevel=False
)
assert to_list(np.sort(np.asarray(v2_array))) == [
1.1,
2.2,
3.3,
4.4,
5.5,
]
assert to_list(v2_array.sort()) == [
1.1,
2.2,
3.3,
4.4,
5.5,
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
@pytest.mark.skip(reason="FIXME: ak._v2.operations.structure.(arg)sort not implemented")
def test_3d():
array = ak._v2.contents.NumpyArray(
np.array(
[
# axis 2: 0 1 2 3 4 # axis 1:
[
[1.1, 2.2, 3.3, 4.4, 5.5], # 0
[6.6, 7.7, 8.8, 9.9, 10.10], # 1
[11.11, 12.12, 13.13, 14.14, 15.15],
], # 2
[
[-1.1, -2.2, -3.3, -4.4, -5.5], # 3
[-6.6, -7.7, -8.8, -9.9, -10.1], # 4
[-11.11, -12.12, -13.13, -14.14, -15.15],
],
]
)
) # 5
assert to_list(
ak._v2.operations.structure.argsort(array, axis=2, ascending=True, stable=False)
) == to_list(np.argsort(array, 2))
assert to_list(
ak._v2.operations.structure.sort(array, axis=2, ascending=True, stable=False)
) == to_list(np.sort(np.asarray(array), 2))
assert to_list(
ak._v2.operations.structure.argsort(array, axis=1, ascending=True, stable=False)
) == to_list(np.argsort(np.asarray(array), 1))
assert to_list(
ak._v2.operations.structure.sort(array, axis=1, ascending=True, stable=False)
) == to_list(np.sort(np.asarray(array), 1))
assert to_list(
ak._v2.operations.structure.sort(
np.asarray(array), axis=1, ascending=False, stable=False
)
) == [
[
[11.11, 12.12, 13.13, 14.14, 15.15],
[6.6, 7.7, 8.8, 9.9, 10.1],
[1.1, 2.2, 3.3, 4.4, 5.5],
],
[
[-1.1, -2.2, -3.3, -4.4, -5.5],
[-6.6, -7.7, -8.8, -9.9, -10.1],
[-11.11, -12.12, -13.13, -14.14, -15.15],
],
]
assert to_list(
ak._v2.operations.structure.sort(array, axis=0, ascending=True, stable=False)
) == to_list(np.sort(np.asarray(array), 0))
assert to_list(
ak._v2.operations.structure.argsort(array, axis=0, ascending=True, stable=False)
) == to_list(np.argsort(np.asarray(array), 0))
def test_bool_sort():
v2_array = ak._v2.operations.convert.from_numpy(
np.array([True, False, True, False, False]), regulararray=True, highlevel=False
)
assert to_list(v2_array.sort()) == [
False,
False,
False,
True,
True,
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_emptyarray_sort():
v2_array = ak._v2.contents.emptyarray.EmptyArray()
assert to_list(v2_array.sort()) == []
v2_array = ak._v2.highlevel.Array([[], [], []]).layout
assert to_list(v2_array.sort()) == [[], [], []]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_listarray_sort():
v2_array = ak._v2.contents.listarray.ListArray( # noqa: F841
ak._v2.index.Index(np.array([4, 100, 1])),
ak._v2.index.Index(np.array([7, 100, 3, 200])),
ak._v2.contents.numpyarray.NumpyArray(
np.array([6.6, 4.4, 5.5, 7.7, 3.3, 2.2, 1.1, 8.8])
),
)
assert to_list(v2_array) == [
[3.3, 2.2, 1.1],
[],
[4.4, 5.5],
]
assert to_list(v2_array.sort()) == [
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_listoffsetarray_sort():
v2_array = ak._v2.operations.convert.from_iter(
[[3.3, 2.2, 1.1], [], [5.5, 4.4], [6.6], [9.9, 7.7, 8.8, 10.1]], highlevel=False
)
assert to_list(v2_array.sort()) == [
[1.1, 2.2, 3.3],
[],
[4.4, 5.5],
[6.6],
[7.7, 8.8, 9.9, 10.10],
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
assert to_list(v2_array.sort(axis=0)) == [
[3.3, 2.2, 1.1],
[],
[5.5, 4.4],
[6.6],
[9.9, 7.7, 8.8, 10.1],
]
assert v2_array.typetracer.sort(axis=0).form == v2_array.sort(axis=0).form
v2_array = ak._v2.operations.convert.from_iter(
[
[[11.1, 0.0, -2.2], [], [33.33, 4.4]],
[],
[[5.5]],
[[6.6, -9.9, 8.8, 7.7]],
[[], [12.2, 1.1, 10.0]],
],
highlevel=False,
)
assert to_list(v2_array.sort(axis=0)) == [
[[5.5, -9.9, -2.2], [], [33.33, 4.4]],
[],
[[6.6]],
[[11.1, 0.0, 8.8, 7.7]],
[[], [12.2, 1.1, 10.0]],
]
assert v2_array.typetracer.sort(axis=0).form == v2_array.sort(axis=0).form
# [
# [[5.5, -9.9, -2.2], [], [33.33, 4.4]],
# [],
# [[6.6]],
# [[11.1, 0.0, 8.8, 7.7]],
# [[], [12.2, 1.1, 10.0]]
# ]
assert to_list(v2_array.sort(axis=1)) == [
[[11.1, 0.0, -2.2], [], [33.33, 4.4]],
[],
[[5.5]],
[[6.6, -9.9, 8.8, 7.7]],
[[], [12.2, 1.1, 10.0]],
]
assert v2_array.typetracer.sort(axis=1).form == v2_array.sort(axis=1).form
# [
# [[11.1, 0.0, -2.2], [], [33.33, 4.4]],
# [],
# [[5.5]],
# [[6.6, -9.9, 8.8, 7.7]],
# [[], [12.2, 1.1, 10.0]]
# ]
assert to_list(v2_array.sort(axis=2)) == [
[[-2.2, 0.0, 11.1], [], [4.4, 33.33]],
[],
[[5.5]],
[[-9.9, 6.6, 7.7, 8.8]],
[[], [1.1, 10.0, 12.2]],
]
assert v2_array.typetracer.sort(axis=2).form == v2_array.sort(axis=2).form
# [
# [[-2.2, 0.0, 11.1], [], [4.4, 33.33]],
# [],
# [[5.5]],
# [[-9.9, 6.6, 7.7, 8.8]],
# [[], [1.1, 10.0, 12.2]]
# ]
assert to_list(v2_array.sort()) == [
[[-2.2, 0.0, 11.1], [], [4.4, 33.33]],
[],
[[5.5]],
[[-9.9, 6.6, 7.7, 8.8]],
[[], [1.1, 10.0, 12.2]],
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
# [
# [[-2.2, 0.0, 11.1], [], [4.4, 33.33]],
# [],
# [[5.5]],
# [[-9.9, 6.6, 7.7, 8.8]],
# [[], [1.1, 10.0, 12.2]]
# ]
def test_regulararray_sort():
v2_array = ak._v2.operations.convert.from_numpy(
np.array(
[
[
[3.3, 1.1, 5.5, 2.2, 4.4],
[8.8, 6.6, 9.9, 7.7, 10.10],
[11.11, 14.14, 15.15, 12.12, 13.13],
],
[
[-1.1, -2.2, -5.5, -3.3, -4.4],
[-7.7, -8.8, -9.9, -6.6, -10.1],
[-13.13, -11.11, -12.12, -14.14, -15.15],
],
]
),
regulararray=True,
highlevel=False,
)
assert to_list(v2_array) == [
[
[3.3, 1.1, 5.5, 2.2, 4.4],
[8.8, 6.6, 9.9, 7.7, 10.1],
[11.11, 14.14, 15.15, 12.12, 13.13],
],
[
[-1.1, -2.2, -5.5, -3.3, -4.4],
[-7.7, -8.8, -9.9, -6.6, -10.1],
[-13.13, -11.11, -12.12, -14.14, -15.15],
],
]
assert to_list(v2_array.sort()) == [
[
[1.1, 2.2, 3.3, 4.4, 5.5],
[6.6, 7.7, 8.8, 9.9, 10.1],
[11.11, 12.12, 13.13, 14.14, 15.15],
],
[
[-5.5, -4.4, -3.3, -2.2, -1.1],
[-10.1, -9.9, -8.8, -7.7, -6.6],
[-15.15, -14.14, -13.13, -12.12, -11.11],
],
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
def test_bytemaskedarray_sort():
content = ak._v2.operations.convert.from_iter(
[
[[1.1, 0.0, 2.2], [], [3.3, 4.4]],
[],
[[5.5]],
[[6.6, 9.9, 8.8, 7.7]],
[[], [12.2, 11.1, 10.0]],
],
highlevel=False,
)
mask = ak._v2.index.Index8(np.array([0, 0, 1, 1, 0], dtype=np.int8))
v2_array = ak._v2.contents.ByteMaskedArray(mask, content, valid_when=False)
assert to_list(v2_array) == [
[[1.1, 0.0, 2.2], [], [3.3, 4.4]],
[],
None,
None,
[[], [12.2, 11.1, 10.0]],
]
assert to_list(v2_array.sort()) == [
[[0.0, 1.1, 2.2], [], [3.3, 4.4]],
[],
None,
None,
[[], [10.0, 11.1, 12.2]],
]
assert v2_array.typetracer.sort().form == v2_array.sort().form
@pytest.mark.skip(reason="FIXME: ak._v2.operations.structure.(arg)sort not implemented")
def test_bytemaskedarray_sort_2():
array3 = ak._v2.highlevel.Array(
[[2.2, 1.1, 3.3], [], [4.4, 5.5], [5.5], [-4.4, -5.5, -6.6]]
).layout
assert to_list(
ak._v2.operations.structure.sort(array3, axis=1, ascending=False, | |
<reponame>ryjmacdonell/geomtools<filename>gimbal/substitute.py
"""
Substitution of molecular geometries with functional groups.
Given a cartesian geometry and element labels, a substituent can
be added knowing (a) the substituent identity (b) the desired
position to substitute and (c) the bond axis for substitution.
This requires some default information, such as the default structure
and orientation of the substituent (relative to an axis) and the
bond length of the substituent. For now, only single bonds are treated.
"""
import numpy as np
import gimbal.displace as displace
import gimbal.fileio as fileio
import gimbal.constants as con
class SubLib(object):
"""
Object containing a library of substituent geometries.
Attributes
----------
syn : dict
A dictionary of synonyms used to find the appropriate
substituents.
elem : dict
A dictionary of atomic symbols for each substituent.
xyz : dict
A dictionary of atomic cartesian coordinates for each
substituent.
"""
def __init__(self):
self.syn = dict()
self.elem = dict()
self.xyz = dict()
self._populate_syn()
self._populate_elem()
self._populate_xyz()
self._add_comb()
def _populate_syn(self):
"""Adds a dictionary of synonyms for labels."""
synlist = [['h'],
['d'],
['me', 'ch3', 'h3c'],
['et', 'ch2ch3', 'c2h5', 'ch3ch2'],
['npr', 'ch2ch2ch3', 'c3h7', 'ch3ch2ch2'],
['ipr', 'chch3ch3', 'ch(ch3)2', '(ch3)2ch', 'ch3chch3'],
['nbu', 'ch2ch2ch2ch3', 'c4h9', 'ch3ch2ch2ch2'],
['ibu', 'chch3ch2ch3', 'ch3chch2ch3', 'ch3ch3chch3'],
['tbu', 'cch3ch3ch3', 'c(ch3)3', 'ch3ch3ch3c', '(ch3)3c'],
['vi', 'chch2', 'c2h3', 'h2chc', 'h3c2'],
['ey', 'cch', 'c2h', 'hcc', 'hc2'],
['ph', 'c6h5', 'h5c6'],
['am', 'nh2', 'h2n'],
['im', 'chnh', 'cnh2', 'nhch'],
['cn', 'nc'],
['oh', 'ho'],
['ome', 'meo', 'och3', 'ch3o'],
['al', 'cho', 'coh', 'och', 'ohc'],
['ac', 'coch3', 'cch3o'],
['ca', 'cooh', 'co2h', 'hooc', 'ho2c'],
['nt', 'no2', 'o2n'],
['f'],
['tfm', 'cf3', 'f3c'],
['sh', 'hs'],
['sf', 'so2h', 'sooh', 'sho2', 'ho2s', 'hso2'],
['ms', 'sfme', 'mesf', 'sfch3', 'so2me', 'so2ch3'],
['cl']]
for subl in synlist:
for item in subl:
self.syn[item] = subl[0]
def _populate_elem(self):
"""Adds element labels to self.elem."""
self.elem['h'] = np.array(['H'])
self.elem['d'] = np.array(['D'])
self.elem['me'] = np.array(['C', 'H', 'H', 'H'])
self.elem['vi'] = np.array(['C', 'C', 'H', 'H', 'H'])
self.elem['ey'] = np.array(['C', 'C', 'H'])
self.elem['ph'] = np.array(['C', 'C', 'C', 'C', 'C', 'C',
'H', 'H', 'H', 'H', 'H'])
self.elem['am'] = np.array(['N', 'H', 'H'])
self.elem['im'] = np.array(['C', 'N', 'H', 'H'])
self.elem['cn'] = np.array(['C', 'N'])
self.elem['oh'] = np.array(['O', 'H'])
self.elem['al'] = np.array(['C', 'O', 'H'])
self.elem['nt'] = np.array(['N', 'O', 'O'])
self.elem['f'] = np.array(['F'])
self.elem['sh'] = np.array(['S', 'H'])
self.elem['sf'] = np.array(['S', 'O', 'O', 'H'])
self.elem['cl'] = np.array(['Cl'])
def _populate_xyz(self):
"""Adds cartesian geometries to self.xyz.
For all substituents, the bonding atom is at the origin,
the bonding axis is the z-axis and the plane axis
is the y-axis.
"""
self.xyz['h'] = np.array([[ 0.000, 0.000, 0.000]])
self.xyz['d'] = np.array([[ 0.000, 0.000, 0.000]])
self.xyz['me'] = np.array([[ 0.000, 0.000, 0.000],
[ 0.511, -0.886, 0.377],
[ 0.511, 0.886, 0.377],
[-1.023, 0.000, 0.377]])
self.xyz['vi'] = np.array([[ 0.000, 0.000, 0.000],
[-1.124, 0.000, 0.730],
[ 0.971, 0.000, 0.495],
[-1.067, 0.000, 1.818],
[-2.095, 0.000, 0.235]])
self.xyz['ey'] = np.array([[ 0.000, 0.000, 0.000],
[ 0.000, 0.000, 1.210],
[ 0.000, 0.000, 2.280]])
self.xyz['ph'] = np.array([[ 0.000, 0.000, 0.000],
[-1.212, 0.000, 0.700],
[ 1.212, 0.000, 0.700],
[-1.212, 0.000, 2.100],
[ 1.212, 0.000, 2.100],
[ 0.000, 0.000, 2.800],
[-2.156, 0.000, 0.155],
[ 2.156, 0.000, 0.155],
[-2.156, 0.000, 2.645],
[ 2.156, 0.000, 2.645],
[ 0.000, 0.000, 3.890]])
self.xyz['am'] = np.array([[ 0.000, 0.000, 0.000],
[-0.577, -0.771, 0.332],
[-0.577, 0.771, 0.332]])
self.xyz['im'] = np.array([[ 0.000, 0.000, 0.000],
[-1.082, 0.000, 0.703],
[ 0.980, 0.000, 0.499],
[-0.869, 0.000, 1.710]])
self.xyz['cn'] = np.array([[ 0.000, 0.000, 0.000],
[ 0.000, 0.000, 1.136]])
self.xyz['oh'] = np.array([[ 0.000, 0.000, 0.000],
[-0.913, 0.000, 0.297]])
self.xyz['al'] = np.array([[ 0.000, 0.000, 0.000],
[-1.011, 0.000, 0.700],
[ 0.998, 0.000, 0.463]])
self.xyz['nt'] = np.array([[ 0.000, 0.000, 0.000],
[-1.105, 0.000, 0.563],
[ 1.105, 0.000, 0.563]])
self.xyz['f'] = np.array([[ 0.000, 0.000, 0.000]])
self.xyz['sh'] = np.array([[ 0.000, 0.000, 0.000],
[-1.331, 0.000, 0.156]])
self.xyz['sf'] = np.array([[ 0.000, 0.000, 0.000],
[ 0.548, -1.266, 0.448],
[ 0.548, 1.266, 0.448],
[-1.311, 0.000, 0.279]])
self.xyz['cl'] = np.array([[ 0.000, 0.000, 0.000]])
def _add_comb(self):
"""Adds substituents made by combining multiple substituents."""
self.elem['et'], self.xyz['et'] = self.add_subs('me', 'me')
self.elem['npr'], self.xyz['npr'] = self.add_subs('me', 'me', 'me')
self.elem['ipr'], self.xyz['ipr'] = self.add_subs('me', 'me', 'me',
inds=1)
self.elem['nbu'], self.xyz['nbu'] = self.add_subs('me', 'me', 'me',
'me')
self.elem['ibu'], self.xyz['ibu'] = self.add_subs('me', 'me', 'me',
'me', inds=[2, 1, -1])
self.elem['tbu'], self.xyz['tbu'] = self.add_subs('me', 'me', 'me',
'me', inds=1)
self.elem['ome'], self.xyz['ome'] = self.add_subs('oh', 'me')
self.elem['ac'], self.xyz['ac'] = self.add_subs('al', 'me')
self.elem['ca'], self.xyz['ca'] = self.add_subs('al', 'oh')
self.elem['tfm'], self.xyz['tfm'] = self.add_subs('me', 'f', 'f', 'f',
inds=1)
self.elem['ms'], self.xyz['ms'] = self.add_subs('sf', 'me')
def get_sub(self, label):
"""Returns the element list and cartesian geometry of a
substituent.
Parameters
----------
label : str
The substituent label of the desired substituent.
Returns
-------
elem : (N,) ndarray
The atomic symbols of the substituent.
xyz : (N, 3) ndarray
The atomic cartesian coordinates of the substituent.
"""
lbl = self.syn[label.lower()]
return self.elem[lbl], self.xyz[lbl]
def add_subs(self, *lbls, inds=-1):
"""Returns the element list and cartesian geometry from a
combination of substituents.
Parameters
----------
lbls : list
A list of substituent labels to be combined.
inds : int or array_like, optional
The indices for substitution between substituents. Setting
inds=-1 (default) makes the last atom the subtituted atom.
Otherwise a list of indices can be given for the first of
each pair of substituents.
Returns
-------
elem : (N,) ndarray
The atomic symbols of the combined substituent.
xyz : (N, 3) ndarray
The atomic cartesian coordinates of the combined substituent.
"""
if isinstance(inds, int):
inds = (len(lbls) - 1) * [inds]
elif len(inds) != len(lbls) - 1:
raise ValueError('Number of inds != number of labels - 1')
rot = 0
lbl0 = self.syn[lbls[0].lower()]
elem = self.elem[lbl0]
xyz = self.xyz[lbl0]
for i, label in zip(inds, lbls[1:]):
dist = np.linalg.norm(xyz - xyz[i], axis=1)
dist[i] += np.max(dist)
ibond = np.argmin(dist)
rot = (rot + 1) % 2
ax = con.unit_vec(xyz[i] - xyz[ibond])
lbl = self.syn[label.lower()]
new_elem = self.elem[lbl]
new_xyz = displace.rotate(self.xyz[lbl], rot*np.pi, 'Z')
new_xyz = displace.align_axis(new_xyz, 'Z', ax)
blen = con.get_covrad(elem[ibond]) + con.get_covrad(new_elem[0])
new_xyz += xyz[ibond] + blen * ax
elem = np.hstack((np.delete(elem, i), new_elem))
xyz = np.vstack((np.delete(xyz, i, axis=0), new_xyz))
return elem, xyz
def import_sub(label):
"""Returns the element list and cartesian geometry of a substituent
given its label.
Parameters
----------
label : str
The substituent label.
Returns
-------
elem : (N,) ndarray
The atomic symbols of the substituent.
xyz : (N, 3) ndarray
The atomic cartesian coordinates of the substituent.
"""
lib = SubLib()
return lib.get_sub(label)
def subst(elem, xyz, sublbl, isub, ibond=None, pl=None, vec=None):
"""Returns a molecular geometry with an specified atom replaced by
substituent.
Labels are case-insensitive. The index isub gives the position to be
substituted. If specified, ibond gives the atom bonded to the
substituent. Otherwise, the nearest atom to isub is used. The
orientation of the substituent can be given as a vector (the plane
normal) or an index (the plane containing isub, ibond and pl).
If isub is given as a list, the entire list of atoms is removed
and the first index is treated as the position of the substituent.
Parameters
----------
elem : (N,) array_like
The atomic symbols of the unsubstituted molecule.
xyz : (N, 3) array_like
The atomic cartesian coordinates of the unsubstituted molecule.
sublbl : str
The substituent label.
isub : int or list
The atomic index (or indices) to be replaced by the substituent.
ibond : int, optional
The atomic index of the atom bonded to position isub. If None
(default), the nearest atom is chosen.
pl : int or array_like, optional
The atomic index or vector defining the xz-plane of the
substituent. If an index is given, the plane normal to the
isub-ibond-pl plane is used. If None (default), the plane
is arbitrarily set to [1, 1, 1] and the bond axis is projected
out.
vec : (N, 3) array_like, optional
The atomic cartesian vectors of the unsubstitued molecule. Default
is None.
Returns
-------
new_elem : (N,) ndarray
The atomic symbols of the substituted molecule.
new_xyz : (N, 3) ndarray
The atomic cartesian coordinates of the substituted molecule.
new_vec : (N, 3) ndarray
The atomic cartesian vectors of the substituted molecule.
Substituent atoms are all set of zero. If vec is None, new_vec
is all zeros.
"""
elem = np.array(elem)
xyz = np.atleast_2d(xyz)
if not | |
<reponame>andrewmkiss/PyXRF<filename>pyxrf/core/utils.py<gh_stars>10-100
import numpy as np
import scipy
import time as ttime
import logging
logger = logging.getLogger(__name__)
# =================================================================================
# The following set of functions are separated from the rest of the program
# and prepared to be moved to scikit-beam (skbeam.core.fitting.xrf_model)
def grid_interpolate(data, xx, yy, xx_uniform=None, yy_uniform=None):
"""
Interpolate unevenly sampled data to even grid. The new even grid has the same
dimensions as the original data and covers full range of original X and Y axes.
Parameters
----------
data : ndarray
2D array with data values (`xx`, `yy` and `data` must have the same shape)
``data`` may be None. In this case interpolation will not be performed, but uniform
grid will be generated. Use this feature to generate uniform grid.
xx : ndarray
2D array with measured values of X coordinates of data points (the values may be unevenly spaced)
yy : ndarray
2D array with measured values of Y coordinates of data points (the values may be unevenly spaced)
xx_uniform : ndarray
2D array with evenly spaced X axis values (same shape as `data`). If not provided, then
generated automatically and returned by the function.
yy_uniform : ndarray
2D array with evenly spaced Y axis values (same shape as `data`). If not provided, then
generated automatically and returned by the function.
Returns
-------
data_uniform : ndarray
2D array with data fitted to even grid (same shape as `data`)
xx_uniform : ndarray
2D array with evenly spaced X axis values (same shape as `data`)
yy_uniform : ndarray
2D array with evenly spaced Y axis values (same shape as `data`)
"""
# Check if data shape and shape of coordinate arrays match
if data is not None:
if data.shape != xx.shape:
msg = "Shapes of data and coordinate arrays do not match. (function 'grid_interpolate')"
raise ValueError(msg)
if xx.shape != yy.shape:
msg = "Shapes of coordinate arrays 'xx' and 'yy' do not match. (function 'grid_interpolate')"
raise ValueError(msg)
if (xx_uniform is not None) and (xx_uniform.shape != xx.shape):
msg = (
"Shapes of data and array of uniform coordinates 'xx_uniform' do not match. "
"(function 'grid_interpolate')"
)
raise ValueError(msg)
if (yy_uniform is not None) and (xx_uniform.shape != xx.shape):
msg = (
"Shapes of data and array of uniform coordinates 'yy_uniform' do not match. "
"(function 'grid_interpolate')"
)
raise ValueError(msg)
ny, nx = xx.shape
# Data must be 2-dimensional to use the following interpolation procedure.
if (nx <= 1) or (ny <= 1):
logger.debug("Function utils.grid_interpolate: single row or column scan. Grid interpolation is skipped")
return data, xx, yy
def _get_range(vv):
"""
Returns the range of the data coordinates along X or Y axis. Coordinate
data for a single axis is represented as a 2D array ``vv``. The array
will have all rows or all columns identical or almost identical.
The range is returned as ``vv_min`` (leftmost or topmost value)
and ``vv_max`` (rightmost or bottommost value). Note, that ``vv_min`` may
be greater than ``vv_max``
Parameters
----------
vv : ndarray
2-d array of coordinates
Returns
-------
vv_min : float
starting point of the range
vv_max : float
end of the range
"""
# The assumption is that X values are mostly changing along the dimension 1 and
# Y values change along the dimension 0 of the 2D array and only slightly change
# along the alternative dimension. Determine, if the range is for X or Y
# axis based on the dimension in which value change is the largest.
if abs(vv[0, 0] - vv[0, -1]) > abs(vv[0, 0] - vv[-1, 0]):
vv_min = np.median(vv[:, 0])
vv_max = np.median(vv[:, -1])
else:
vv_min = np.median(vv[0, :])
vv_max = np.median(vv[-1, :])
return vv_min, vv_max
if xx_uniform is None or yy_uniform is None:
# Find the range of axes
x_min, x_max = _get_range(xx)
y_min, y_max = _get_range(yy)
_yy_uniform, _xx_uniform = np.mgrid[y_min : y_max : ny * 1j, x_min : x_max : nx * 1j]
if xx_uniform is None:
xx_uniform = _xx_uniform
if yy_uniform is None:
yy_uniform = _yy_uniform
xx = xx.flatten()
yy = yy.flatten()
xxyy = np.stack((xx, yy)).T
if data is not None:
# Do the interpolation only if data is provided
data = data.flatten()
# Do the interpolation
data_uniform = scipy.interpolate.griddata(
xxyy, data, (xx_uniform, yy_uniform), method="linear", fill_value=0
)
else:
data_uniform = None
return data_uniform, xx_uniform, yy_uniform
def normalize_data_by_scaler(data_in, scaler, *, data_name=None, name_not_scalable=None):
"""
Normalize data based on the availability of scaler
Parameters
----------
data_in : ndarray
numpy array of input data
scaler : ndarray
numpy array of scaling data, the same size as data_in
data_name : str
name of the data set ('time' or 'i0' etc.)
name_not_scalable : list
names of not scalable datasets (['time', 'i0_time'])
Returns
-------
ndarray with normalized data, the same shape as data_in
The returned array is the reference to 'data_in' if no normalization
is applied to data or reference to modified copy of 'data_in' if
normalization was applied.
::note::
Normalization will not be performed if the following is true:
- scaler is None
- scaler is not the same shape as data_in
- scaler contains all elements equal to zero
If normalization is not performed then REFERENCE to data_in is returned.
"""
if data_in is None or scaler is None: # Nothing to scale
logger.debug(
"Function utils.normalize_data_by_scaler: data and/or scaler arrays are None. "
"Data scaling is skipped."
)
return data_in
if data_in.shape != scaler.shape:
logger.debug(
"Function utils.normalize_data_by_scaler: data and scaler arrays have different shape. "
"Data scaling is skipped."
)
return data_in
do_scaling = False
# Check if data name is in the list of non-scalable items
# If data name or the list does not exits, then do the scaling
if name_not_scalable is None or data_name is None or data_name not in name_not_scalable:
do_scaling = True
# If scaler is all zeros, then don't scale the data:
# check if there is at least one nonzero element
n_nonzero = np.count_nonzero(scaler)
if not n_nonzero:
logger.debug(
"Function utils.normalize_data_by_scaler: scaler is all-zeros array. Data scaling is skipped."
)
do_scaling = False
if do_scaling:
# If scaler contains some zeros, set those zeros to mean value
if data_in.size != n_nonzero:
s_mean = np.mean(scaler[scaler != 0])
# Avoid division by very small number (or zero)
if np.abs(s_mean) < 1e-10:
s_mean = 1e-10 if np.sign(s_mean) >= 0 else -1e-10
scaler = scaler.copy()
scaler[scaler == 0.0] = s_mean
data_out = data_in / scaler
else:
data_out = data_in
return data_out
# ===============================================================================
# The following functions are prepared to be moved to scikit-beam
def _get_2_sqrt_2_log2():
return 2 * np.sqrt(2 * np.log(2))
def gaussian_sigma_to_fwhm(sigma):
"""
Converts parameters of Gaussian curve: 'sigma' to 'fwhm'
Parameters
----------
sigma : float
sigma of the Gaussian curve
Returns
-------
FWHM of the Gaussian curve
"""
return sigma * _get_2_sqrt_2_log2()
def gaussian_fwhm_to_sigma(fwhm):
"""
Converts parameters of Gaussian curve: 'fwhm' to 'sigma'
Parameters
----------
fwhm : float
Full Width at Half Maximum of the Gaussian curve
Returns
-------
sigma of the Gaussian curve
"""
return fwhm / _get_2_sqrt_2_log2()
def _get_sqrt_2_pi():
return np.sqrt(2 * np.pi)
def gaussian_max_to_area(peak_max, peak_sigma):
"""
Computes the area under Gaussian curve based on maximum and sigma
Parameters
----------
peak_max : float
maximum of the Gaussian curve
peak_sigma : float
sigma of the Gaussian curve
Returns
-------
area under the Gaussian curve
"""
return peak_max * peak_sigma * _get_sqrt_2_pi()
def gaussian_area_to_max(peak_area, peak_sigma):
"""
Computes the maximum of the Gaussian curve based on area
under the curve and sigma
Parameters
----------
peak_area : float
area under the Gaussian curve
peak_sigma : float
sigma of the Gaussian curve
Returns
-------
area under the Gaussian curve
"""
if peak_sigma == 0:
return 0
else:
return peak_area / peak_sigma / _get_sqrt_2_pi()
# ==================================================================================
def convert_time_to_nexus_string(t):
"""
Convert time to a string according to NEXUS format
Parameters
----------
t : time.struct_time
Time in the format returned by ``time.localtime`` or ``time.gmtime``
Returns
-------
t : str
A string represetation of time according to NEXUS standard
"""
# Convert to sting format recommented for NEXUS files
t = ttime.strftime("%Y-%m-%dT%H:%M:%S+00:00", t)
return | |
= g.toUnicodeFileEncoding(tail)
return head, tail
#@+node:ekr.20031218072017.2159: *3* g.os_path_splitext
def os_path_splitext(path):
path = g.toUnicodeFileEncoding(path)
head, tail = os.path.splitext(path)
head = g.toUnicodeFileEncoding(head)
tail = g.toUnicodeFileEncoding(tail)
return head, tail
#@+node:ekr.20090829140232.6036: *3* g.os_startfile
def os_startfile(fname):
#@+others
#@+node:bob.20170516112250.1: *4* stderr2log()
def stderr2log(g, ree, fname):
""" Display stderr output in the Leo-Editor log pane
Arguments:
g: Leo-Editor globals
ree: Read file descriptor for stderr
fname: file pathname
Returns:
None
"""
while True:
emsg = ree.read().decode('utf-8')
if emsg:
g.es_print_error('xdg-open {fn} caused output to stderr:\n{em}'.format(fn=fname, em=emsg))
else:
break
#@+node:bob.20170516112304.1: *4* itPoll()
def itPoll(fname, ree, subPopen, g, ito):
""" Poll for subprocess done
Arguments:
fname: File name
ree: stderr read file descriptor
subPopen: URL open subprocess object
g: Leo-Editor globals
ito: Idle time object for itPoll()
Returns:
None
"""
stderr2log(g, ree, fname)
rc = subPopen.poll()
if not rc is None:
ito.stop()
ito.destroy_self()
if rc != 0:
g.es_print('xdg-open {fn} failed with exit code {ec}'.format(fn=fname, ec=rc))
stderr2log(g, ree, fname)
ree.close()
#@-others
if fname.find('"') > -1:
quoted_fname = "'%s'" % fname
else:
quoted_fname = '"%s"' % fname
if sys.platform.startswith('win'):
# pylint: disable=no-member
os.startfile(quoted_fname)
# Exists only on Windows.
elif sys.platform == 'darwin':
# From Marc-Antoine Parent.
try:
# Fix bug 1226358: File URL's are broken on MacOS:
# use fname, not quoted_fname, as the argument to subprocess.call.
subprocess.call(['open', fname])
except OSError:
pass # There may be a spurious "Interrupted system call"
except ImportError:
os.system('open %s' % (quoted_fname))
else:
# Linux
# The buffering argument to NamedTempFile does not exist on Python 2.
try:
ree = None
wre = tempfile.NamedTemporaryFile()
ree = io.open(wre.name, 'rb', buffering=0)
except IOError:
g.trace('error opening temp file for %r' % fname)
if ree: ree.close()
return
try:
subPopen = subprocess.Popen(['xdg-open', fname], stderr=wre, shell=False)
except Exception:
g.es_print('error opening %r' % fname)
g.es_exception()
try:
itoPoll = g.IdleTime((lambda ito: itPoll(fname, ree, subPopen, g, ito)), delay=1000)
itoPoll.start()
# Let the Leo-Editor process run
# so that Leo-Editor is usable while the file is open.
except Exception:
g.es_exception('exception executing g.startfile for %r' % fname)
#@+node:ekr.20031218072017.2160: *3* g.toUnicodeFileEncoding
def toUnicodeFileEncoding(path):
# Fix bug 735938: file association crash
if path and g.isString(path):
path = path.replace('\\', os.sep)
# Yes, this is correct. All os_path_x functions return Unicode strings.
return g.toUnicode(path)
return ''
#@+node:ekr.20111115155710.9859: ** g.Parsing & Tokenizing
#@+node:ekr.20031218072017.822: *3* g.createTopologyList
def createTopologyList(c, root=None, useHeadlines=False):
"""Creates a list describing a node and all its descendents"""
if not root: root = c.rootPosition()
v = root
if useHeadlines:
aList = [(v.numberOfChildren(), v.headString()),]
else:
aList = [v.numberOfChildren()]
child = v.firstChild()
while child:
aList.append(g.createTopologyList(c, child, useHeadlines))
child = child.next()
return aList
#@+node:ekr.20111017204736.15898: *3* g.getDocString
def getDocString(s):
'''Return the text of the first docstring found in s.'''
tags = ('"""', "'''")
tag1, tag2 = tags
i1, i2 = s.find(tag1), s.find(tag2)
if i1 == -1 and i2 == -1:
return ''
if i1 > -1 and i2 > -1:
i = min(i1, i2)
else:
i = max(i1, i2)
tag = s[i: i + 3]
assert tag in tags
j = s.find(tag, i + 3)
if j > -1:
return s[i + 3: j]
return ''
#@+node:ekr.20111017211256.15905: *3* g.getDocStringForFunction
def getDocStringForFunction(func):
'''Return the docstring for a function that creates a Leo command.'''
def name(func):
return func.__name__ if hasattr(func, '__name__') else '<no __name__>'
def get_defaults(func, i):
args, varargs, keywords, defaults = inspect.getargspec(func)
return defaults[i]
# Fix bug 1251252: https://bugs.launchpad.net/leo-editor/+bug/1251252
# Minibuffer commands created by mod_scripting.py have no docstrings.
# Do special cases first.
s = ''
if name(func) == 'minibufferCallback':
func = get_defaults(func, 0)
if hasattr(func, 'func.__doc__') and func.__doc__.strip():
s = func.__doc__
if not s and name(func) == 'commonCommandCallback':
script = get_defaults(func, 1)
s = g.getDocString(script)
# Do a text scan for the function.
# Now the general cases. Prefer __doc__ to docstring()
if not s and hasattr(func, '__doc__'):
s = func.__doc__
if not s and hasattr(func, 'docstring'):
s = func.docstring
return s
#@+node:ekr.20111115155710.9814: *3* g.python_tokenize
def python_tokenize(s, line_numbers=True):
'''Tokenize string s and return a list of tokens (kind,value,line_number)
where kind is in ('comment,'id','nl','other','string','ws').
'''
result, i, line_number = [], 0, 0
while i < len(s):
progress = j = i
ch = s[i]
if ch == '\n':
kind, i = 'nl', i + 1
elif ch in ' \t':
kind = 'ws'
while i < len(s) and s[i] in ' \t':
i += 1
elif ch == '#':
kind, i = 'comment', g.skip_to_end_of_line(s, i)
elif ch in '"\'':
kind, i = 'string', g.skip_python_string(s, i, verbose=False)
elif ch == '_' or ch.isalpha():
kind, i = 'id', g.skip_id(s, i)
else:
kind, i = 'other', i + 1
assert progress < i and j == progress
val = s[j: i]
assert val
if line_numbers:
line_number += val.count('\n') # A comment.
result.append((kind, val, line_number),)
else:
result.append((kind, val),)
return result
#@+node:ekr.20040327103735.2: ** g.Scripting
#@+node:ekr.20161223090721.1: *3* g.exec_file
def exec_file(path, d, script=None):
'''Simulate python's execfile statement for python 3.'''
if script is None:
with open(path) as f:
script = f.read()
exec(compile(script, path, 'exec'), d)
#@+node:ekr.20131016032805.16721: *3* g.execute_shell_commands
def execute_shell_commands(commands, trace=False):
'''
Execute each shell command in a separate process.
Wait for each command to complete, except those starting with '&'
'''
if g.isString(commands): commands = [commands]
for command in commands:
wait = not command.startswith('&')
if command.startswith('&'): command = command[1:].strip()
proc = subprocess.Popen(command, shell=True)
if wait: proc.communicate()
#@+node:ekr.20180217113719.1: *3* g.execute_shell_commands_with_options & helpers
def execute_shell_commands_with_options(
base_dir = None,
c = None,
command_setting = None,
commands = None,
path_setting = None,
trace = False,
warning = None,
):
'''
A helper for prototype commands or any other code that
runs programs in a separate process.
base_dir: Base directory to use if no config path given.
commands: A list of commands, for g.execute_shell_commands.
commands_setting: Name of @data setting for commands.
path_setting: Name of @string setting for the base directory.
warning: A warning to be printed before executing the commands.
'''
base_dir = g.computeBaseDir(c, base_dir, path_setting, trace)
if not base_dir:
return
commands = g.computeCommands(c, commands, command_setting, trace)
if not commands:
return
if warning:
g.es_print(warning)
os.chdir(base_dir) # Can't do this in the commands list.
g.execute_shell_commands(commands)
#@+node:ekr.20180217152624.1: *4* g.computeBaseDir
def computeBaseDir(c, base_dir, path_setting, trace=False):
'''
Compute a base_directory.
If given, @string path_setting takes precedence.
'''
# Prefer the path setting to the base_dir argument.
if path_setting:
if not c:
return g.es_print('@string path_setting requires valid c arg')
# It's not an error for the setting to be empty.
base_dir2 = c.config.getString(path_setting)
if base_dir2:
base_dir2 = base_dir2.replace('\\','/')
if g.os_path_exists(base_dir2):
return base_dir2
return g.es_print('@string %s not found: %r' % (path_setting, base_dir2))
# Fall back to given base_dir.
if base_dir:
base_dir = base_dir.replace('\\','/')
if g.os_path_exists(base_dir):
return base_dir
return g.es_print('base_dir not found: %r' % base_dir)
return g.es_print('Please use @string %s' % path_setting)
#@+node:ekr.20180217153459.1: *4* g.computeCommands
def computeCommands(c, commands, command_setting, trace=False):
'''
Get the list of commands.
If given, @data command_setting takes precedence.
'''
if not commands and not command_setting:
g.es_print('Please use commands, command_setting or both')
return []
# Prefer the setting to the static commands.
if command_setting:
if c:
aList = c.config.getData(command_setting)
# It's not an error for the setting to be empty.
# Fall back to the commands.
return aList or commands
g.es_print('@data command_setting requires valid c arg')
return []
return commands
#@+node:ekr.20050503112513.7: *3* g.executeFile
def executeFile(filename, options=''):
if not os.access(filename, os.R_OK): return
fdir, fname = g.os_path_split(filename)
# New in Leo 4.10: alway use subprocess.
def subprocess_wrapper(cmdlst):
p = subprocess.Popen(cmdlst, cwd=fdir,
universal_newlines=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdo, stde = p.communicate()
return p.wait(), stdo, stde
rc, so, se = subprocess_wrapper('%s %s %s' % (sys.executable, fname, options))
if rc: g.pr('return code', rc)
g.pr(so, se)
#@+node:ekr.20031218072017.3138: *3* g.executeScript
def executeScript(name):
"""Execute a script whose short python file name is given.
This is called only from the scripts_menu plugin."""
mod_name, ext = g.os_path_splitext(name)
theFile = None
try:
# This code is in effect an import or a reload.
# This allows the user to modify scripts without leaving Leo.
theFile, filename, description = imp.find_module(mod_name)
imp.load_module(mod_name, theFile, filename, description)
except Exception:
g.error("exception executing", name)
g.es_exception()
if theFile:
theFile.close()
#@+node:ekr.20040321065415: *3* g.findNode... &,findTopLevelNode
def findNodeInChildren(c, p, headline, exact=True):
"""Search for a node in v's tree matching the given headline."""
p1 = p.copy()
h = headline.strip()
for p in p1.children():
if p.h.strip() == h:
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
import numpy as np
import time
import ptb_reader
import ptb_config
import wiki2_config
import os
import sys
import ast
import re
import rnn_cell_additions as dr
import argparse
import logging
from dynamic_eval import DynamicEval
class PTBModel(object):
"""class for handling the ptb model"""
def __init__(self,
config,
is_training,
inputs):
"""the constructor builds the tensorflow_impl graph"""
self._input = inputs
vocab_size = config.vocab_size # num of possible words
self._gpu_devices = [i for i in range(len(get_gpu_devices(args.gpu_devices)))][0]
self._cpu_device = args.cpu_device
self._config = config
self._debug_ops = list()
self._stat_ops = list()
if config.mos:
self._mos_mask = None
self._gen_mos_mask = None
with tf.name_scope("model_variables"):
with tf.name_scope("global_step"):
self._global_step = tf.Variable(0, name='global_step', trainable=False)
with tf.name_scope("epoch_counter"):
self._epoch_count = tf.Variable(0, name='epoch', trainable=False)
self._epoch_inc = tf.assign(self._epoch_count, tf.add(self._epoch_count, tf.constant(1)))
self._epoch_reset = tf.assign(self._epoch_count, tf.constant(0))
# construct the embedding layer on cpu device
with tf.variable_scope("embedding"), tf.device(self._cpu_device):
# the embedding matrix is allocated in the cpu to save valuable gpu memory for the model.
if is_training:
logger.info("adding embedding matrix with dims [{:d}, {:d}]".format(vocab_size, config.embedding_size))
embedding_map = tf.get_variable(
name="embedding", dtype=tf.float32,
initializer=tf.random_uniform(shape=[vocab_size, config.embedding_size],
minval=-0.1, maxval=0.1,seed=seed, dtype=tf.float32))
if is_training:
logger.info("adding embedding bias with dims [{:d}]".format(config.embedding_size))
# b_embed_in = tf.get_variable(name="b_embed_in",
# initializer=tf.zeros([config.embedding_size], dtype=tf.float32),
# dtype=tf.float32)
embedding = tf.nn.embedding_lookup(embedding_map, self._input.input_data) # + b_embed_in
if is_training and (config.keep_prob_embed < 1 or config.drop_i < 1):
# non variational wrapper for the embedding
logger.info("adding embedding mask with dims [{:d}, {:d}, {:d}]".format(config.batch_size, config.time_steps, config.embedding_size))
self._emb_mask = tf.placeholder(dtype=tf.float32, shape=[config.batch_size, config.time_steps, config.embedding_size],
name="embedding_mask")
if config.keep_prob_embed < 1:
if config.drop_embed_var:
logger.info("using variational embedding dropout")
with tf.name_scope("out_mask_gen"):
random_tensor = ops.convert_to_tensor(config.keep_prob_embed)
random_tensor += random_ops.random_uniform([config.batch_size, 1, config.embedding_size], seed=seed)
random_tensor = tf.tile(random_tensor, [1, config.time_steps, 1])
self._gen_emb_mask = math_ops.floor(random_tensor)
else:
logger.info("using naive embedding dropout")
with tf.name_scope("out_mask_gen"):
random_tensor = ops.convert_to_tensor(config.keep_prob_embed)
random_tensor += random_ops.random_uniform([config.batch_size, config.time_steps, config.embedding_size], seed=seed)
self._gen_emb_mask = math_ops.floor(random_tensor)
else:
with tf.name_scope("out_mask_gen"):
self._gen_emb_mask = tf.ones([config.batch_size, config.time_steps, config.embedding_size])
embedding_out = math_ops.div(embedding, config.drop_i*config.keep_prob_embed) * self._emb_mask
else:
embedding_out = embedding
with tf.name_scope("inner_model"): # tf.device("/gpu:%d" % self._gpu_devices),
loss, grads, cell, initial_state, final_state, softmax = self.complete_model(embedding_out,
embedding_map,
is_training)
self._softmax = softmax
self._cell = cell
self._initial_state = initial_state
self._final_state = final_state
self._loss = loss
self._grads = grads
if is_training:
# set learning rate as variable in order to anneal it throughout training
with tf.name_scope("learning_rate"):
self._lr = tf.Variable(config.lr, trainable=False, dtype=tf.float32)
# a placeholder to assign a new learning rate
self._new_lr = tf.placeholder(
tf.float32, shape=[], name="new_learning_rate")
# function to update learning rate
self._lr_update = tf.assign(self._lr, self._new_lr)
# get trainable vars
tvars = tf.trainable_variables()
# define an optimizer with the averaged gradients
with tf.name_scope("optimizer"):
self._optimizer = []
if config.opt == "sgd":
logger.info("using SGD optimizer")
self._optimizer = SGDOptimizer(self, grads, tvars)
self._train_op = self._optimizer.train_op
elif config.opt == "asgd":
logger.info("using ASGD optimizer")
opt = SGDOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = ASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif config.opt == "masgd":
logger.info("using MASGD optimizer")
opt = SGDOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = MASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif config.opt == "rms":
logger.info("using RMS optimizer")
self._optimizer = RMSpropOptimizer(self, grads, tvars)
self._train_op = self._optimizer.train_op
elif config.opt == "arms":
logger.info("using ARMS optimizer")
opt = RMSpropOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = ASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
elif config.opt == "marms":
logger.info("using MARMS optimizer")
opt = RMSpropOptimizer(self, grads, tvars, use_opt=False)
self._optimizer = MASGDOptimizer(self, opt.updates, tvars)
self._train_op = self._optimizer.train_op
else:
raise ValueError( config.opt + " is not a valid optimizer")
if config.dynamic_eval is not None:
# average grads ; sync point
# get trainable vars
tvars = tf.trainable_variables()
self._dynamic_eval = DynamicEval(config, tvars, grads)
self._train_op = self._dynamic_eval.update_op()
def complete_model(self, embedding_out, embedding_map, is_training):
""" Build rest of model for a single gpu
Args:
embedding_out: the embedding representation to be processed
Returns:
loss: a list for the loss calculated for each layer.
grads: a list for the grads calculated for each loss.
"""
targets = self._input.targets
config = self._config
batch_size = config.batch_size
time_steps = config.time_steps # num of time steps used in BPTT
vocab_size = config.vocab_size # num of possible words
# units_num = config.units_num # num of units in the hidden layer
#
# def lstm_cell(lstm_size):
# if config.DC:
# if is_training:
# logger.info("using weight-dropped LSTM cell")
# return dr.WeightDroppedLSTMCell(num_units=lstm_size,
# is_training=is_training,
# state_is_tuple=True)
# else:
# if is_training:
# logger.info("using LSTM cell")
# return tf.contrib.rnn.LSTMBlockCell(num_units=lstm_size,
# forget_bias=config.forget_bias_init)
#
# possible_cell = lstm_cell
# # if dropout is needed add a dropout wrapper
# if is_training and (config.drop_output[0] < 1 or config.drop_output[1] < 1 or
# config.drop_state[0] < 1 or config.drop_state[1] < 1):
# def possible_cell(lstm_size):
# if config.variational is not None:
# if config.DC:
# if is_training:
# logger.info("using weight-dropped variational dropout")
# return dr.WeightDroppedVariationalDropoutWrapper(lstm_cell(lstm_size),
# batch_size,
# lstm_size)
# else:
# if is_training:
# logger.info("using variational dropout")
# return dr.VariationalDropoutWrapper(lstm_cell(lstm_size),
# batch_size,
# lstm_size)
# else:
# if config.DC:
# raise ValueError("DC is used with variational dropout")
# if is_training:
# logger.info("using naive dropout")
# return tf.nn.rnn_cell.DropoutWrapper(lstm_cell(lstm_size),
# output_keep_prob=config.drop_output)
# with tf.device("/gpu:0"):
lstm_output, cell, state, initial_state = self._build_rnn_graph(embedding_out, is_training)
# # organize layers' outputs and states in a list
# cell = []
# initial_state = []
# outputs = []
# state = []
# lstm_output = []
# for _ in range(config.lstm_layers_num):
# outputs.append([])
# state.append([])
#
# if is_training:
# logger.info("adding LSTM layer #1")
# # unroll the cell to "time_steps" times
# with tf.variable_scope("lstm%d" % 1):
# lstm_size = units_num[0]
# cell.append(possible_cell(lstm_size))
# initial_state.append(cell[0].zero_state(batch_size, dtype=tf.float32))
# state[0] = initial_state[0]
# for time_step in range(time_steps):
# if time_step > 0:
# tf.get_variable_scope().reuse_variables()
# (new_h, state[0]) = cell[0](embedding_out[:, time_step, :], state[0])
# outputs[0].append(new_h)
# lstm_output.append(tf.reshape(tf.concat(values=outputs[0], axis=1), [-1, lstm_size]))
#
# # rest of layers
# for i in range(1, config.lstm_layers_num):
# if is_training:
# logger.info("adding LSTM layer #{:d}".format(i+1))
# with tf.variable_scope("lstm%d" % (i + 1)):
# lstm_size = units_num[i]
# cell.append(possible_cell(lstm_size))
# initial_state.append(cell[i].zero_state(batch_size, dtype=tf.float32))
# state[i] = initial_state[i]
# for time_step in range(time_steps):
# if time_step > 0:
# tf.get_variable_scope().reuse_variables()
# (new_h, state[i]) = cell[i](outputs[i - 1][time_step], state[i])
# outputs[i].append(new_h)
# lstm_output.append(tf.reshape(tf.concat(values=outputs[i], axis=1), [-1, lstm_size]))
#
# lstm_output = lstm_output[-1]
if config.embedding_size == config.units_num[-1] or config.mos:
# outer softmax matrix is tied with embedding matrix
if is_training:
logger.info("tied embedding")
w_out = tf.transpose(embedding_map)
else:
if is_training:
logger.info("untied embedding")
w_out = tf.get_variable(name="w_embed_out", shape=[config.units_num[-1],vocab_size], dtype=tf.float32)
b_out = tf.get_variable(name="b_out",
dtype=tf.float32,initializer=tf.zeros([config.vocab_size], dtype=tf.float32))
with tf.name_scope("loss"):
with tf.name_scope("data_loss"):
if config.mos:
if is_training:
logger.info("adding mos with %d contexts" % config.mos_context_num)
with tf.name_scope("mos"):
# pi
prior = tf.get_variable(name="mos_pi",
shape=[config.units_num[-1], config.mos_context_num],
dtype=tf.float32)
prior = tf.matmul(lstm_output, prior)
pi = tf.nn.softmax(prior, name="mos_prior")
# context vectors
w_h = tf.get_variable(name="mos_w_h",
shape=[config.units_num[-1], config.mos_context_num*config.embedding_size],
dtype=tf.float32)
b_h = tf.get_variable(name="mos_b_h",
shape=[config.mos_context_num * config.embedding_size],
dtype=tf.float32)
h = tf.reshape(tf.tanh(tf.matmul(lstm_output, w_h) + b_h), [-1, config.embedding_size])
if is_training:
self._mos_mask = tf.placeholder(dtype=tf.float32,
shape=[config.batch_size*config.time_steps*config.mos_context_num, config.embedding_size],
name="mos_mask")
if config.variational is not None:
with tf.name_scope("mos_mask_gen"):
random_tensor = ops.convert_to_tensor(config.mos_drop)
random_tensor += random_ops.random_uniform([config.batch_size, 1, config.mos_context_num*config.embedding_size], seed=seed)
random_tensor = tf.tile(random_tensor, [1, config.time_steps, 1])
self._gen_mos_mask = tf.reshape(math_ops.floor(random_tensor),
[config.batch_size*config.time_steps*config.mos_context_num, config.embedding_size])
else:
with tf.name_scope("mos_mask_gen"):
random_tensor = ops.convert_to_tensor(config.mos_drop)
random_tensor += random_ops.random_uniform(
[config.batch_size*config.mos_context_num*config.time_steps, config.embedding_size], seed=seed)
self._gen_mos_mask = math_ops.floor(random_tensor)
h = math_ops.div(h, config.mos_drop) * self._mos_mask
a = tf.matmul(h, w_out) + b_out
# mos
a_mos = tf.reshape(tf.nn.softmax(a), [-1, config.mos_context_num, config.vocab_size])
pi = tf.reshape(pi, [-1, config.mos_context_num, 1])
weighted_softmax = tf.multiply(a_mos, pi)
softmax = tf.reduce_sum(weighted_softmax, axis=1)
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([tf.log(softmax+1e-8)],
[tf.reshape(targets, [-1])],
[tf.ones([batch_size * time_steps],
dtype=tf.float32)])
loss = tf.reduce_mean(losses)
else:
if is_training:
logger.info("adding softmax layer")
logits = tf.matmul(lstm_output, w_out) + b_out
softmax = 1#tf.nn.softmax(logits)
losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits],
[tf.reshape(targets, [-1])],
[tf.ones([batch_size * time_steps],
dtype=tf.float32)])
loss = tf.reduce_mean(losses)
raw_loss = loss
if config.AR and is_training:
logger.info("using activation regularization")
with tf.name_scope("AR"):
# for j in range(config.lstm_layers_num):
loss += config.AR * tf.reduce_mean(tf.square(tf.reshape(lstm_output, [-1, 1])))
if config.TAR and is_training:
logger.info("using temporal activation regularization")
with tf.name_scope("TAR"):
# for j in range(config.lstm_layers_num):
outputs_reshaped = tf.reshape(lstm_output, [config.batch_size, config.time_steps, -1])
diff = outputs_reshaped[:, :-1, :] - outputs_reshaped[:, 1:, :]
loss += config.TAR * tf.reduce_mean(tf.square(tf.reshape(diff, [-1, 1])))
if config.wdecay and is_training:
logger.info("using L2 regularization")
for tvar in tf.trainable_variables():
loss += config.wdecay * tf.reduce_sum(tf.square(tf.reshape(tvar, [-1, 1])))
with tf.name_scope("compute_grads"):
grads = tf.gradients(loss, tf.trainable_variables())
final_state = state
return raw_loss, grads, cell, initial_state, final_state, softmax
def _build_rnn_graph(self, inputs, is_training):
config = self.config
batch_size = config.batch_size
# define basic lstm cell
| |
{'NotifyWatcherId': {'type': 'string'},
'error': {'$ref': '#/definitions/Error'}},
'required': ['NotifyWatcherId'],
'type': 'object'},
'Number': {'additionalProperties': False,
'properties': {'Build': {'type': 'integer'},
'Major': {'type': 'integer'},
'Minor': {'type': 'integer'},
'Patch': {'type': 'integer'},
'Tag': {'type': 'string'}},
'required': ['Major',
'Minor',
'Tag',
'Patch',
'Build'],
'type': 'object'},
'ProcessRelations': {'additionalProperties': False,
'properties': {'controller-alias': {'type': 'string'}},
'required': ['controller-alias'],
'type': 'object'},
'SerializedModel': {'additionalProperties': False,
'properties': {'bytes': {'items': {'type': 'integer'},
'type': 'array'},
'charms': {'items': {'type': 'string'},
'type': 'array'},
'resources': {'items': {'$ref': '#/definitions/SerializedModelResource'},
'type': 'array'},
'tools': {'items': {'$ref': '#/definitions/SerializedModelTools'},
'type': 'array'}},
'required': ['bytes',
'charms',
'tools',
'resources'],
'type': 'object'},
'SerializedModelResource': {'additionalProperties': False,
'properties': {'application': {'type': 'string'},
'application-revision': {'$ref': '#/definitions/SerializedModelResourceRevision'},
'charmstore-revision': {'$ref': '#/definitions/SerializedModelResourceRevision'},
'name': {'type': 'string'},
'unit-revisions': {'patternProperties': {'.*': {'$ref': '#/definitions/SerializedModelResourceRevision'}},
'type': 'object'}},
'required': ['application',
'name',
'application-revision',
'charmstore-revision',
'unit-revisions'],
'type': 'object'},
'SerializedModelResourceRevision': {'additionalProperties': False,
'properties': {'description': {'type': 'string'},
'fingerprint': {'type': 'string'},
'origin': {'type': 'string'},
'path': {'type': 'string'},
'revision': {'type': 'integer'},
'size': {'type': 'integer'},
'timestamp': {'format': 'date-time',
'type': 'string'},
'type': {'type': 'string'},
'username': {'type': 'string'}},
'required': ['revision',
'type',
'path',
'description',
'origin',
'fingerprint',
'size',
'timestamp'],
'type': 'object'},
'SerializedModelTools': {'additionalProperties': False,
'properties': {'uri': {'type': 'string'},
'version': {'type': 'string'}},
'required': ['version', 'uri'],
'type': 'object'},
'SetMigrationPhaseArgs': {'additionalProperties': False,
'properties': {'phase': {'type': 'string'}},
'required': ['phase'],
'type': 'object'},
'SetMigrationStatusMessageArgs': {'additionalProperties': False,
'properties': {'message': {'type': 'string'}},
'required': ['message'],
'type': 'object'},
'StringResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'type': 'string'}},
'required': ['result'],
'type': 'object'}},
'properties': {'Export': {'description': 'Export serializes the model '
'associated with the API connection.',
'properties': {'Result': {'$ref': '#/definitions/SerializedModel'}},
'type': 'object'},
'MigrationStatus': {'description': 'MigrationStatus returns '
'the details and progress '
'of the latest\n'
'model migration.',
'properties': {'Result': {'$ref': '#/definitions/MasterMigrationStatus'}},
'type': 'object'},
'MinionReportTimeout': {'description': 'MinionReportTimeout '
'returns the '
'configuration value '
'for this controller '
'that\n'
'indicates how long the '
'migration master '
'worker should wait for '
'minions to\n'
'reported on phases of '
'a migration.',
'properties': {'Result': {'$ref': '#/definitions/StringResult'}},
'type': 'object'},
'MinionReports': {'description': 'MinionReports returns '
'details of the reports made '
'by migration\n'
'minions to the controller '
'for the current migration '
'phase.',
'properties': {'Result': {'$ref': '#/definitions/MinionReports'}},
'type': 'object'},
'ModelInfo': {'description': 'ModelInfo returns essential '
'information about the model to '
'be\n'
'migrated.',
'properties': {'Result': {'$ref': '#/definitions/MigrationModelInfo'}},
'type': 'object'},
'Prechecks': {'description': 'Prechecks performs pre-migration '
'checks on the model and\n'
'(source) controller.',
'type': 'object'},
'ProcessRelations': {'description': 'ProcessRelations '
'processes any relations '
'that need updating after '
'an export.\n'
'This should help fix any '
'remoteApplications that '
'have been migrated.',
'properties': {'Params': {'$ref': '#/definitions/ProcessRelations'}},
'type': 'object'},
'Reap': {'description': 'Reap removes all documents for the '
'model associated with the API\n'
'connection.',
'type': 'object'},
'SetPhase': {'description': 'SetPhase sets the phase of the '
'active model migration. The '
'provided\n'
'phase must be a valid phase '
'value, for example QUIESCE" or\n'
'"ABORT". See the core/migration '
'package for the complete list.',
'properties': {'Params': {'$ref': '#/definitions/SetMigrationPhaseArgs'}},
'type': 'object'},
'SetStatusMessage': {'description': 'SetStatusMessage sets a '
'human readable status '
'message containing\n'
'information about the '
"migration's progress. "
'This will be shown in\n'
'status output shown to '
'the end user.',
'properties': {'Params': {'$ref': '#/definitions/SetMigrationStatusMessageArgs'}},
'type': 'object'},
'Watch': {'description': 'Watch starts watching for an active '
'migration for the model\n'
'associated with the API connection. '
'The returned id should be used\n'
'with the NotifyWatcher facade to '
'receive events.',
'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'},
'WatchMinionReports': {'description': 'WatchMinionReports sets '
'up a watcher which '
'reports when a report\n'
'for a migration minion '
'has arrived.',
'properties': {'Result': {'$ref': '#/definitions/NotifyWatchResult'}},
'type': 'object'}},
'type': 'object'}
@ReturnMapping(SerializedModel)
async def Export(self):
'''
Export serializes the model associated with the API connection.
Returns -> SerializedModel
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='Export',
version=3,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(MasterMigrationStatus)
async def MigrationStatus(self):
'''
MigrationStatus returns the details and progress of the latest
model migration.
Returns -> MasterMigrationStatus
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='MigrationStatus',
version=3,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(StringResult)
async def MinionReportTimeout(self):
'''
MinionReportTimeout returns the configuration value for this controller that
indicates how long the migration master worker should wait for minions to
reported on phases of a migration.
Returns -> StringResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='MinionReportTimeout',
version=3,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(MinionReports)
async def MinionReports(self):
'''
MinionReports returns details of the reports made by migration
minions to the controller for the current migration phase.
Returns -> MinionReports
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='MinionReports',
version=3,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(MigrationModelInfo)
async def ModelInfo(self):
'''
ModelInfo returns essential information about the model to be
migrated.
Returns -> MigrationModelInfo
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='ModelInfo',
version=3,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Prechecks(self):
'''
Prechecks performs pre-migration checks on the model and
(source) controller.
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='Prechecks',
version=3,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def ProcessRelations(self, controller_alias=None):
'''
ProcessRelations processes any relations that need updating after an export.
This should help fix any remoteApplications that have been migrated.
controller_alias : str
Returns -> None
'''
if controller_alias is not None and not isinstance(controller_alias, (bytes, str)):
raise Exception("Expected controller_alias to be a str, received: {}".format(type(controller_alias)))
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='ProcessRelations',
version=3,
params=_params)
_params['controller-alias'] = controller_alias
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def Reap(self):
'''
Reap removes all documents for the model associated with the API
connection.
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='Reap',
version=3,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def SetPhase(self, phase=None):
'''
SetPhase sets the phase of the active model migration. The provided
phase must be a valid phase value, for example QUIESCE" or
"ABORT". See the core/migration package for the complete list.
phase : str
Returns -> None
'''
if phase is not None and not isinstance(phase, (bytes, str)):
raise Exception("Expected phase to be a str, received: {}".format(type(phase)))
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='SetPhase',
version=3,
params=_params)
_params['phase'] = phase
reply = await self.rpc(msg)
return reply
@ReturnMapping(None)
async def SetStatusMessage(self, message=None):
'''
SetStatusMessage sets a human readable status message containing
information about the migration's progress. This will be shown in
status output shown to the end user.
message : str
Returns -> None
'''
if message is not None and not isinstance(message, (bytes, str)):
raise Exception("Expected message to be a str, received: {}".format(type(message)))
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='SetStatusMessage',
version=3,
params=_params)
_params['message'] = message
reply = await self.rpc(msg)
return reply
@ReturnMapping(NotifyWatchResult)
async def Watch(self):
'''
Watch starts watching for an active migration for the model
associated with the API connection. The returned id should be used
with the NotifyWatcher facade to receive events.
Returns -> NotifyWatchResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='Watch',
version=3,
params=_params)
reply = await self.rpc(msg)
return reply
@ReturnMapping(NotifyWatchResult)
async def WatchMinionReports(self):
'''
WatchMinionReports sets up a watcher which reports when a report
for a migration minion has arrived.
Returns -> NotifyWatchResult
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='MigrationMaster',
request='WatchMinionReports',
version=3,
params=_params)
reply = await self.rpc(msg)
return reply
class ModelGenerationFacade(Type):
name = 'ModelGeneration'
version = 3
schema = {'definitions': {'BoolResult': {'additionalProperties': False,
'properties': {'error': {'$ref': '#/definitions/Error'},
'result': {'type': 'boolean'}},
'required': ['result'],
'type': 'object'},
'BranchArg': {'additionalProperties': False,
'properties': {'branch': {'type': 'string'}},
'required': ['branch'],
'type': 'object'},
'BranchInfoArgs': {'additionalProperties': False,
'properties': {'branches': {'items': {'type': 'string'},
'type': 'array'},
'detailed': {'type': 'boolean'}},
'required': ['branches', 'detailed'],
'type': 'object'},
'BranchTrackArg': {'additionalProperties': False,
| |
<gh_stars>1-10
import sys
import logging
import traceback
import time
import cv2
import asyncio
import numpy as np
from FPS import FPS
from enum import IntEnum
import json
from OpenVINO_Engine import OpenVINO_Util, OpenVINO_Engine
from OpenVINO_Config import Engine_State, Model_Flag
from concurrent.futures import ThreadPoolExecutor, CancelledError
from WebServer import ImageStreamHandler
from pathlib import Path
from Video_Data import Video_Data, Video_Device_Type, Video_Data_State, Video_Playback_Mode
import youtube_dl
class VideoProcessorState(IntEnum):
Unknown = 0
Running = 1
Stop = 2
Pause = 3
Error = 4
class VideoProcessor(object):
#
# Initialization of Video Processor Class
# Reads video frame from Video Stream class and process (AI Inference etc)
# Set frame data to displayFrame for visualization
#
def __init__(self,
videoPath = '/dev/video0',
videoW = 1024,
videoH = 768,
fontScale = 1.0,
verbose = True):
self.verbose = verbose
self._debug = False
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
logging.info('===============================================================')
logging.info('Initializing Video Processor with the following parameters:')
logging.info(' - OpenCV Version : {}'.format(cv2.__version__))
logging.info(' - Device Path : {}'.format(videoPath))
logging.info(' - Frame Size : {}x{}'.format(videoW, videoH))
logging.info('===============================================================')
# To send message to clients (Browser)
self.imageStreamHandler = None
self.threadExecutor = None
# Video source
self.videoData = Video_Data(self, videoPath)
self.displayFrame = np.array([])
self.frame_org = np.array([])
# for Frame Rate measurement
self.fps = FPS()
playback_mode = self.videoData.get_playback_mode()
self._playback_sync = (playback_mode == Video_Playback_Mode.Sync)
self._fps_target = 30
self._fps_wait = 1000.0/30
self.currentFps = 30.0
# For display
self._fontScale = float(fontScale)
self._annotate = False
# Track states of this object
self.set_video_processor_state(VideoProcessorState.Unknown)
# OpenVINO
self.inference_engine = None
self.runInference = 0
self.ioLoop = None
self.current_model_data = None
#
# Sets up Video Processor Class
# Creates Video Stream Class for video capture
#
def __enter__(self):
# async def __aenter__(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
self.set_video_path('{{\"set_video_path\":\"{}\"}}'.format(self.videoData.videoPath))
self.inference_engine = OpenVINO_Engine(self)
# with OpenVINO_Util() as openVino:
# devices = openVino.get_supported_devices()
# for device in devices:
# logging.info('>> Device : {0}'.format(device))
# fullName = openVino.get_device_name(device)
# logging.info('>> Name : {0}'.format(fullName))
# self.inference_engine.hwList.append(device)
self.inference_engine.initialize_engine()
return self
#
# Clean up Video Processor Class
#
def __exit__(self, exception_type, exception_value, traceback):
# async def __aexit__(self, exception_type, exception_value, traceback):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self.threadExecutor:
self.threadExecutor.shutdown(wait=True)
self.set_video_processor_state(VideoProcessorState.Stop)
#
# Send message to browser
#
def send_message(self, msg):
if self.imageStreamHandler:
ImageStreamHandler.broadcast_message(msg)
#
# Set Video Processor State flag
#
def set_video_processor_state(self, flag):
self._state = flag
#
# Initializes Video Source
#
def _init_video_source(self):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
raise NotImplementedError
#
# Sets current video frame for display
#
def set_display_frame(self, frame):
assert frame.size > 0, "Frame Empty"
self.displayFrame = frame
#
# Resturns current video frame for display
# Converts to byte data
#
def get_display_frame(self):
if self.displayFrame.size == 0:
if self.videoData.get_video_data_state() == Video_Data_State.PhotoReady:
if self.videoData.videoH == 0 or self.videoData.videoW == 0:
wallpaper = np.zeros((720, 1280, 3), np.uint8)
else:
wallpaper = np.zeros((self.videoData.videoH, self.videoData.videoW, 3), np.uint8)
ret, buffer = cv2.imencode( '.jpg', wallpaper )
else:
return None, 0
else:
ret, buffer = cv2.imencode( '.jpg', self.displayFrame )
if ret and buffer.size > 0:
return buffer.tobytes(), self.currentFps
else:
assert(False), '>> Display Frame Empty *************** '
#
# Resturns Inference Engine Info
#
def get_inference_engine_info(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self.inference_engine:
devices = json.dumps(self.inference_engine.get_devices())
if self.runInference == 1:
state = "On"
else:
state = "Off"
return '{{\"{0}\":\"{1}\",\"devices\":{2},\"get_inference_state\":\"{3}\"}}'.format(sys._getframe().f_code.co_name, self.inference_engine.signature, devices, state)
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Retrieve a list of models
#
def get_model_list(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self.inference_engine:
json_data = json.loads('{\"get_model_list\":[]}')
model_list = self.inference_engine.get_model_list()
for model in model_list:
json_data["get_model_list"].append(json.loads(model.to_json()))
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
return json_data
#
# Set to keep FPS for video or not
#
def playback_mode(self, msg):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
jsonData = json.loads(msg)
playback_mode = jsonData["playback_mode"]
self._playback_sync = playback_mode == "0"
self.videoData.set_playback_mode(playback_mode)
return '{{\"playback_mode\":\"{0}\"}}'.format(self.videoData.get_playback_mode())
#
# Stop video process
#
def set_video_playback(self, msg):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
jsonData = json.loads(msg)
if jsonData['set_video_playback'] == "1":
self.set_video_processor_state(VideoProcessorState.Running)
else:
self.set_video_processor_state(VideoProcessorState.Pause)
return self.get_video_playback()
#
# Return current video playback state
#
def get_video_playback(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self._state == VideoProcessorState.Pause:
state = "0"
elif self._state == VideoProcessorState.Running:
state = "1"
else:
assert False, "Unexpected Video Processor State"
return '{{\"get_video_playback\":\"{}\"}}'.format(state)
#
# Stop video process
#
def set_video_stop(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
self.videoData.set_video_playback(isPause = True)
self.set_video_processor_state(VideoProcessorState.Pause)
#
# Start video process
#
def set_video_start(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
self.fps.reset(self.videoData.get_video_fps())
self.videoData.set_video_playback(isPause = False)
self.set_video_processor_state(VideoProcessorState.Running)
self.send_message('{\"frame_ready\":1}')
#
# Set Video Resolution
#
def set_video_path(self, msg, loop = None):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
jsonData = json.loads(msg)
if jsonData.get("set_video_path"):
videoPath = jsonData["set_video_path"]
else:
videoPath = jsonData["videoPath"]
self.set_video_processor_state(VideoProcessorState.Pause)
video_data_state = self.videoData.set_video_path(videoPath, loop)
if video_data_state == Video_Data_State.Running or video_data_state == Video_Data_State.PhotoReady:
self.fps.reset(self.videoData.get_video_fps())
self.set_video_start()
else:
self.set_video_processor_state(VideoProcessorState.Pause)
return self.get_video_path()
#
# Return current video path
#
def get_video_path(self):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
return self.videoData.get_video_path()
#
# Set Video Resolution
#
def set_video_resolution(self, msg):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
return self.videoData.set_video_resolution(msg)
#
# Get Video Resolution
#
def get_video_resolution(self):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
return self.videoData.get_video_resolution()
#
# Set AI model to use
#
async def set_ai_model(self, loop, msg):
if self.verbose:
logging.info('>> {0}:{1}() {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, msg))
try:
self.ioLoop = loop
#1 Get Model Data
model_data = self.inference_engine.get_ai_model_data(msg)
current_hw = json.loads(self.inference_engine.get_target_device())
current_precision = json.loads(self.inference_engine.get_precision())
if model_data.isFlagSet(Model_Flag.Loaded):
json_data = json.loads(msg)
device = json_data["set_target_device"]
precision = json_data["set_precision"]
if current_hw['get_target_device'] == device and current_precision['get_precision'] == precision:
logging.info(">> Model {} is loaded to {}".format(model_data.modelName, current_hw))
self.runInference = 1
self.send_message('{{\"set_ai_model\":\"Running {}\",\"isComplete\":1}}'.format(model_data.modelName))
else:
if self.current_model_data:
self.current_model_data.clearFlag(Model_Flag.Loaded)
self.current_model_data = None
if not model_data is None:
self.set_device_params(msg)
# self.set_precision(msg)
# self.set_target_device(msg)
# create a task to download model from model zoo
self.set_video_processor_state(VideoProcessorState.Pause)
self.send_message('{{\"set_ai_model\":\"Downloading {}\"}}'.format(model_data.modelName))
task = self.ioLoop.run_in_executor(None, self.inference_engine.download_model, model_data)
task.add_done_callback(self.model_download_callback)
else:
json_data = json.loads(msg)
self.send_message('{{\"set_ai_model\":\"Failed to get model data for {}\",\"isFailure\":1}}'.format(json_data["SetAiModel"]))
except CancelledError:
logging.info('-- {0}() - Cancelled'.format(sys._getframe().f_code.co_name))
except Exception as ex:
exc_type, exc_obj, exc_tb = sys.exc_info()
traceback.print_exception(exc_type, exc_obj, exc_tb)
logging.error('!! {0}:{1}() : Exception {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, ex))
#
# Callback function for model download
#
def model_download_callback(self, future):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
model_data = future.result()
assert(model_data is not None, "Model Data is None")
if model_data.isFlagSet(Model_Flag.Downloaded):
self.send_message('{{\"set_ai_model\":\"{} downloaded. Converting to IR\"}}'.format(model_data.modelName))
if model_data.framework == 'dldt':
task = self.ioLoop.run_in_executor(None, self.inference_engine.load_model, model_data)
task.add_done_callback(self.model_load_callback)
else:
task = self.ioLoop.run_in_executor(None, self.inference_engine.convert_model, model_data)
task.add_done_callback(self.model_convert_callback)
else:
self.set_video_start()
self.send_message('{{\"set_ai_model\":\"Download failed {}\",\"isFailure\":1}}'.format(model_data.errorMsg))
#
# Callback function for model conversion
#
def model_convert_callback(self, future):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
model_data = future.result()
if model_data.isFlagSet(Model_Flag.Converted):
logging.info(' FP16 {}'.format(str(model_data.ir_dir['FP16'])))
logging.info(' FP32 {}'.format(str(model_data.ir_dir['FP32'])))
self.send_message('{{\"set_ai_model\":\"{} converted to IR.\\nLoading....\", \"isSuccess\":1}}'.format(model_data.modelName))
self.inference_engine.remove_model_dir(model_data)
task = self.ioLoop.run_in_executor(None, self.inference_engine.load_model, model_data)
task.add_done_callback(self.model_load_callback)
else:
self.set_video_start()
self.send_message('{{\"set_ai_model\":\"Convert Failed : {}\",\"isFailure\":1}}'.format(model_data.errorMsg))
#
# Callback function for model load
#
def model_load_callback(self, future):
if self.verbose:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
model_data = future.result()
self.set_video_start()
if model_data.isFlagSet(Model_Flag.Loaded):
target_device = json.loads(self.inference_engine.get_target_device())
self.send_message('{{\"set_ai_model\":\"Successfully loaded {}\", \"isComplete\":1}}'.format(model_data.modelName))
self.send_message('{{\"get_inference_engine_info\":\"{} running on {}\"}}'.format(self.inference_engine.signature, target_device['get_target_device']))
self.current_model_data = model_data
else:
self.send_message('{{\"set_ai_model\":\"Load failed : {}\",\"isFailure\":1}}'.format(model_data.errorMsg))
#
# Set hardware to run inference on
#
def set_device_params(self, msg, reload = False):
if self.verbose:
logging.info('>> {0}:{1}() {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, msg))
if self.inference_engine:
self.inference_engine.set_target_device(msg)
self.inference_engine.set_precision(msg)
if reload == True and self.current_model_data:
# create a task to download model from model zoo
self.set_video_processor_state(VideoProcessorState.Pause)
self.send_message('{{\"set_ai_model\":\"Loading {}\"}}'.format(self.current_model_data.modelName))
task = self.ioLoop.run_in_executor(None, self.inference_engine.load_model, self.current_model_data)
task.add_done_callback(self.model_download_callback)
return self.inference_engine.set_target_device(msg)
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Set hardware to run inference on
#
# def set_target_device(self, msg):
# if self.verbose:
# logging.info('>> {0}:{1}() {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, msg))
# if self.inference_engine:
# self.inference_engine.set_target_device(msg)
# self.inference_engine.set_precision(msg)
# if self.current_model_data:
# # create a task to download model from model zoo
# self.set_video_processor_state(VideoProcessorState.Pause
# self.send_message('{{\"set_ai_model\":\"Loading {}\"}}'.format(self.current_model_data.modelName))
# task = self.ioLoop.run_in_executor(None, self.inference_engine.load_model, self.current_model_data)
# task.add_done_callback(self.model_download_callback)
# return self.inference_engine.set_target_device(msg)
# else:
# assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
# return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Return hardware to run inference on
#
def get_target_device(self):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self.inference_engine:
return self.inference_engine.get_target_device()
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Set Inference Precision
#
# def set_precision(self, msg):
# if self.verbose:
# logging.info('>> {0}:{1}() {2}'.format(self.__class__.__name__, sys._getframe().f_code.co_name, msg))
# if self.inference_engine:
# self.inference_engine.set_precision(msg)
# if self.current_model_data:
# # create a task to download model from model zoo
# self.set_video_processor_state(VideoProcessorState.Pause
# self.send_message('{{\"set_ai_model\":\"Loading {}\"}}'.format(self.current_model_data.modelName))
# task = self.ioLoop.run_in_executor(None, self.inference_engine.load_model, self.current_model_data)
# task.add_done_callback(self.model_download_callback)
# return self.get_precision()
# else:
# assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
# return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Get Inference Precision
#
def get_precision(self):
if self._debug:
logging.info('>> {0}:{1}()'.format(self.__class__.__name__, sys._getframe().f_code.co_name))
if self.inference_engine:
return self.inference_engine.get_precision()
else:
assert False, '>> {} : Inference Engine Not Set'.format(sys._getframe().f_code.co_name)
return '{{\"{}\":\"Inference Engine Not Set\", \"isFailure\":1}}'.format(sys._getframe().f_code.co_name)
#
# Set | |
self._exp.fetch_trials_data(trial_indices=[max(self._exp.trials)]),
]
)
@abstractmethod
def generate_evaluate_new_parameter_values(
self,
evaluation_function: Callable,
arm_count: int = -1 # -1 means
# create all arms (i.e. all combinations of parameter values)
) -> None:
"""A place holder method for users that are still using it.
It previously ran evaluation for trials. That part was moved to
generator_run_for_search_methods(). Now this method does nothing.
"""
pass
@staticmethod
def _repivot_dataframe(armscore_df: pd.DataFrame):
"""Reformats the score data frame.
Args:
armscore_df: Pandas DataFrame object that has the arm scores
in raw format.
Returns:
Pandas DataFrame object of arm score in the new format
"""
transform = (
armscore_df.set_index(["trial_index", "arm_name", "metric_name"])
.unstack("metric_name")
.reset_index()
)
new_cols = transform.columns.to_flat_index()
parameters_holder = transform[
list(filter(lambda x: "parameters" in x, new_cols))[0]
]
transform.drop(columns="parameters", level=0, inplace=True)
new_cols = new_cols.drop(labels=filter(lambda x: "parameters" in x, new_cols))
transform.columns = ["trial_index", "arm_name"] + [
"_".join(tpl) for tpl in new_cols[2:]
]
transform["parameters"] = parameters_holder
return transform
def list_parameter_value_scores(
self, legit_arms_only: bool = False
) -> pd.DataFrame:
"""Creates a Pandas DataFrame from evaluated arms then returns it.
The method should be called to fetch evaluation results of arms that
are populated and evaluated so far.
Args:
legit_arms_only: A flag to filter arms that violate output_constraints
if given any.
Returns:
A Pandas DataFrame that holds arms populated and evaluated so far.
"""
# For experiments which have not ran generate_evaluate_new_parameter_values,
# we cannot provide trial data without metrics, so we return empty dataframe
if not self._exp.metrics:
return pd.DataFrame(
[],
columns=[
"arm_name",
"metric_name",
"mean",
"sem",
"parameters",
"trial_index",
],
)
armscore_df = self._trial_data.df.copy()
armscore_df["parameters"] = armscore_df["arm_name"].map(
{k: v.parameters for k, v in self._exp.arms_by_name.items()}
)
if self.outcome_constraints:
# Deduplicate entries for which there are outcome constraints
# pyre-ignore[16]: `None` has no attribute `index`.
armscore_df = armscore_df.loc[armscore_df.astype(str).drop_duplicates().index]
if legit_arms_only:
def filter_violating_arms(
arms: List[Arm], data: Data, optimization_config: OptimizationConfig
) -> List[Arm]:
boolean_indices = []
for oc in optimization_config.outcome_constraints:
if oc.op is ComparisonOp.LEQ:
boolean_indices.append(
data.df[data.df.metric_name == oc.metric.name]["mean"]
<= oc.bound
)
else:
boolean_indices.append(
data.df[data.df.metric_name == oc.metric.name]["mean"]
>= oc.bound
)
eligible_arm_indices = reduce(lambda x, y: x & y, boolean_indices)
eligible_arm_names = data.df.loc[eligible_arm_indices.index][
eligible_arm_indices
].arm_name
return list(
filter(lambda x: x.name in eligible_arm_names.values, arms)
)
filtered_arms = filter_violating_arms(
list(self._exp.arms_by_name.values()),
# pyre-fixme[6]: Expected `Data` for 2nd param but got
# `AbstractDataFrameData`.
self._exp.fetch_data(),
self._exp.optimization_config,
)
armscore_df = armscore_df[
armscore_df["arm_name"].isin([arm.name for arm in filtered_arms])
]
armscore_df = self._repivot_dataframe(armscore_df)
return armscore_df
class SearchMethodFactory(metaclass=Final):
"""Generates and returns search strategy object."""
def __init__(self):
raise TypeError(
"SearchMethodFactory is not allowed to be instantiated. Use "
"it as a static class."
)
@staticmethod
def create_search_method(
parameters: List[Dict],
selected_search_method: SearchMethodEnum = SearchMethodEnum.GRID_SEARCH,
experiment_name: Optional[str] = None,
objective_name: Optional[str] = None,
outcome_constraints: Optional[List[str]] = None,
seed: Optional[int] = None,
bootstrap_size: int = 5,
evaluation_function: Optional[Callable] = None,
bootstrap_arms_for_bayes_opt: Optional[List[dict]] = None,
multiprocessing: bool = False,
) -> TimeSeriesParameterTuning:
"""The static method of factory class that creates the search method
object. It does not require the class to be instantiated.
Args:
parameters: List[Dict] = None,
Defines parameters by their names, their types their optional
values for custom parameter search space.
selected_search_method: SearchMethodEnum = SearchMethodEnum.GRID_SEARCH
Defines search method to be used during parameter tuning. It has to
be an option from the enum, SearchMethodEnum.
experiment_name: str = None,
Name of the experiment to be used in Ax's experiment object.
objective_name: str = None,
Name of the objective to be used in Ax's experiment evaluation.
outcome_constraints: List[str] = None
List of constraints defined as strings. Example: ['metric1 >= 0',
'metric2 < 5]
bootstrap_arms_for_bayes_opt: List[dict] = None
List of params. It provides a list of self-defined inital parameter
values for Baysian Optimal search. Example: for Holt Winter's model,
[{'m': 7}, {'m': 14}]
Returns:
A search object, GridSearch, RandomSearch, or BayesianOptSearch,
depending on the selection.
Raises:
NotImplementedError: Raised if the selection is not among strategies
that are implemented.
"""
if selected_search_method == SearchMethodEnum.GRID_SEARCH:
return GridSearch(
parameters=parameters,
experiment_name=experiment_name,
objective_name=objective_name,
outcome_constraints=outcome_constraints,
multiprocessing=multiprocessing,
)
elif (
selected_search_method == SearchMethodEnum.RANDOM_SEARCH_UNIFORM
or selected_search_method == SearchMethodEnum.RANDOM_SEARCH_SOBOL
):
return RandomSearch(
parameters=parameters,
experiment_name=experiment_name,
objective_name=objective_name,
random_strategy=selected_search_method,
outcome_constraints=outcome_constraints,
seed=seed,
multiprocessing=multiprocessing,
)
elif selected_search_method == SearchMethodEnum.BAYES_OPT:
assert (
evaluation_function is not None
), "evaluation_function cannot be None. It is needed at initialization of BayesianOptSearch object."
return BayesianOptSearch(
parameters=parameters,
evaluation_function=evaluation_function,
experiment_name=experiment_name,
objective_name=objective_name,
bootstrap_size=bootstrap_size,
seed=seed,
bootstrap_arms_for_bayes_opt=bootstrap_arms_for_bayes_opt,
outcome_constraints=outcome_constraints,
multiprocessing=multiprocessing,
)
else:
raise NotImplementedError(
"A search method yet to implement is selected. Only grid"
" search and random search are implemented."
)
class GridSearch(TimeSeriesParameterTuning):
"""The method factory class that creates the search method object. It does
not require the class to be instantiated.
Do not instantiate this class using its constructor.
Rather use the factory, SearchMethodFactory.
Attributes:
parameters: List[Dict] = None,
Defines parameters by their names, their types their optional
values for custom parameter search space.
experiment_name: str = None,
Name of the experiment to be used in Ax's experiment object.
objective_name: str = None,
Name of the objective to be used in Ax's experiment evaluation.
outcome_constraints: List[str] = None
List of constraints defined as strings. Example: ['metric1 >= 0',
'metric2 < 5]
"""
def __init__(
self,
parameters: List[Dict],
experiment_name: Optional[str] = None,
objective_name: Optional[str] = None,
outcome_constraints: Optional[List[str]] = None,
multiprocessing: bool = False,
**kwargs,
) -> None:
super().__init__(
parameters,
experiment_name,
objective_name,
outcome_constraints,
multiprocessing,
)
self._factorial = Models.FACTORIAL(
search_space=self.get_search_space(), check_cardinality=False
)
self.logger.info("A factorial model for arm generation is created.")
self.logger.info("A GridSearch object is successfully created.")
def generate_evaluate_new_parameter_values(
self,
evaluation_function: Callable,
arm_count: int = -1, # -1 means create all arms (i.e. all combinations of
# parameter values)
) -> None:
"""This method can only be called once. arm_count other than -1 will be ignored
as this search strategy exhaustively explores all arms.
"""
if arm_count != -1:
# FullFactorialGenerator ignores specified arm_count as it automatically determines how many arms
self.logger.info(
"GridSearch arm_count input is ignored and automatically determined by generator."
)
arm_count = -1
factorial_run = self._factorial.gen(n=arm_count)
self.generator_run_for_search_method(
evaluation_function=evaluation_function, generator_run=factorial_run
)
class RandomSearch(TimeSeriesParameterTuning):
"""Random search for hyperparameter tuning.
Do not instantiate this class using its constructor.
Rather use the factory, SearchMethodFactory.
Attributes:
parameters: List[Dict],
Defines parameters by their names, their types their optional
values for custom parameter search space.
experiment_name: str = None,
Name of the experiment to be used in Ax's experiment object.
objective_name: str = None,
Name of the objective to be used in Ax's experiment evaluation.
seed: int = None,
Seed for Ax quasi-random model. If None, then time.time() is set.
random_strategy: SearchMethodEnum = SearchMethodEnum.RANDOM_SEARCH_UNIFORM,
By now, we already know that the search method is random search.
However, there are optional random strategies: UNIFORM, or SOBOL.
This parameter allows to select it.
outcome_constraints: List[str] = None
List of constraints defined as strings. Example: ['metric1 >= 0',
'metric2 < 5]
"""
def __init__(
self,
parameters: List[Dict],
experiment_name: Optional[str] = None,
objective_name: Optional[str] = None,
seed: Optional[int] = None,
random_strategy: SearchMethodEnum = SearchMethodEnum.RANDOM_SEARCH_UNIFORM,
outcome_constraints: Optional[List[str]] = None,
multiprocessing: bool = False,
**kwargs,
) -> None:
super().__init__(
parameters,
experiment_name,
objective_name,
outcome_constraints,
multiprocessing,
)
if seed is None:
seed = int(time.time())
self.logger.info(
"No seed is given by the user, it will be set by the current time"
)
self.logger.info("Seed that is used in random search: {seed}".format(seed=seed))
if random_strategy == SearchMethodEnum.RANDOM_SEARCH_UNIFORM:
self._random_strategy_model = Models.UNIFORM(
search_space=self.get_search_space(), deduplicate=True, seed=seed
)
elif random_strategy == SearchMethodEnum.RANDOM_SEARCH_SOBOL:
self._random_strategy_model = Models.SOBOL(
search_space=self.get_search_space(), deduplicate=True, seed=seed
)
else:
raise NotImplementedError(
"Invalid random strategy selection. It should be either "
"uniform or sobol."
)
self.logger.info(
"A {random_strategy} model for candidate parameter value generation"
" is created.".format(random_strategy=random_strategy)
)
self.logger.info("A RandomSearch object is successfully created.")
def generate_evaluate_new_parameter_values(
self, evaluation_function: Callable, arm_count: int = 1
) -> None:
"""This method can be called as many times as desired with arm_count in
desired number. The total number of generated candidates will be equal
to the their multiplication. Suppose we would like to sample k
candidates where k = m x n such that k, m, n are integers. We can call
this function once with `arm_count=k`, or call it k time with
`arm_count=1` (or without that parameter at all), or call it | |
sage: FW(y) == x
True
sage: FW(z) == x
True
.. WARNING::
Must be implemented in style "PW0" and "W0P".
"""
PW0 = self.realization_of().PW0()
return self(PW0.from_classical_weyl(w))
def from_dual_classical_weyl(self, w):
r"""
Return the image of `w` from the finite Weyl group of dual form into ``self``.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',3,1]); PvW0 = E.PvW0()
sage: W0v = E.dual_classical_weyl()
sage: w = W0v.from_reduced_word([2,1,3])
sage: y = PvW0.from_dual_classical_weyl(w); y
s2*s3*s1
sage: y.parent() == PvW0
True
sage: y.to_dual_classical_weyl() == w
True
sage: x = E.FW().from_dual_classical_weyl(w); x
S2*S3*S1
sage: PvW0(x) == y
True
.. WARNING::
Must be implemented in style "PvW0" and "W0Pv".
"""
return self(self.realization_of().PvW0().from_dual_classical_weyl(w))
def from_affine_weyl(self, w):
r"""
Return the image of `w` under the homomorphism from the affine Weyl group
into ``self``.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',3,1]); PW0=E.PW0()
sage: W = E.affine_weyl()
sage: w = W.from_reduced_word([2,1,3,0])
sage: x = PW0.from_affine_weyl(w); x
t[Lambdacheck[1] - 2*Lambdacheck[2] + Lambdacheck[3]] * s3*s1
sage: FW = E.FW()
sage: y = FW.from_affine_weyl(w); y
S2*S3*S1*S0
sage: FW(x) == y
True
.. WARNING::
Must be implemented in style "WF" and "FW".
"""
WF = self.realization_of().WF()
return self(WF.from_affine_weyl(w))
def from_reduced_word(self, word):
r"""
Converts an affine or finite reduced word into a group element.
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',2,1]).PW0().from_reduced_word([1,0,1,2])
t[-Lambdacheck[1] + 2*Lambdacheck[2]]
"""
return self.from_affine_weyl(self.realization_of().affine_weyl().from_reduced_word(word))
class ElementMethods:
@abstract_method
def has_descent(self, i, side='right', positive=False):
r"""
Return whether ``self`` * `s_i` < ``self`` where `s_i` is the `i`-th simple
reflection in the realized group.
INPUT:
- ``i`` -- an affine Dynkin index
OPTIONAL:
- ``side`` -- 'right' or 'left' (default: 'right')
- ``positive`` -- True or False (default: False)
If ``side``='left' then the reflection acts
on the left. If ``positive`` = True then the inequality is reversed.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',3,1]); WF=E.WF()
sage: F = E.fundamental_group()
sage: x = WF.an_element(); x
S0*S1*S2*S3 * pi[3]
sage: I = E.cartan_type().index_set()
sage: [(i, x.has_descent(i)) for i in I]
[(0, True), (1, False), (2, False), (3, False)]
sage: [(i, x.has_descent(i,side='left')) for i in I]
[(0, True), (1, False), (2, False), (3, False)]
sage: [(i, x.has_descent(i,positive=True)) for i in I]
[(0, False), (1, True), (2, True), (3, True)]
.. WARNING::
This method is abstract because it is used in the recursive coercions
between "PW0" and "WF" and other methods use this coercion.
"""
def first_descent(self, side='right', positive=False, index_set=None):
r"""
Return the first descent of ``self``.
INPUT:
- ``side`` -- 'left' or 'right' (default: 'right')
- ``positive`` -- True or False (default: False)
- ``index_set`` -- an optional subset of Dynkin nodes
If ``index_set`` is not None, then the descent must be in the ``index_set``.
EXAMPLES::
sage: x = ExtendedAffineWeylGroup(['A',3,1]).WF().an_element(); x
S0*S1*S2*S3 * pi[3]
sage: x.first_descent()
0
sage: x.first_descent(side='left')
0
sage: x.first_descent(positive=True)
1
sage: x.first_descent(side='left',positive=True)
1
"""
if index_set is None:
index_set = self.parent().realization_of().cartan_type().index_set()
for i in index_set:
if self.has_descent(i, side=side, positive=positive):
return i
return None
def apply_simple_reflection(self, i, side='right'):
r"""
Apply the `i`-th simple reflection to ``self``.
EXAMPLES::
sage: x = ExtendedAffineWeylGroup(['A',3,1]).WF().an_element(); x
S0*S1*S2*S3 * pi[3]
sage: x.apply_simple_reflection(1)
S0*S1*S2*S3*S0 * pi[3]
sage: x.apply_simple_reflection(0, side='left')
S1*S2*S3 * pi[3]
"""
s = self.parent().simple_reflection(i)
if side == 'right':
return self*s
else:
return s*self
def apply_simple_projection(self, i, side='right', length_increasing=True):
r"""
Return the product of ``self`` by the simple reflection `s_i` if that product is
of greater length than ``self`` and otherwise return ``self``.
INPUT:
- ``self`` -- an element of the extended affine Weyl group
- `i` -- a Dynkin node (index of a simple reflection `s_i`)
- ``side`` -- 'right' or 'left' (default: 'right') according to which side of ``self`` the reflection `s_i` should be multiplied
- ``length_increasing`` -- True or False (default True). If False do the above with the word "greater" replaced by "less".
EXAMPLES::
sage: x = ExtendedAffineWeylGroup(['A',3,1]).WF().an_element(); x
S0*S1*S2*S3 * pi[3]
sage: x.apply_simple_projection(1)
S0*S1*S2*S3*S0 * pi[3]
sage: x.apply_simple_projection(1, length_increasing=False)
S0*S1*S2*S3 * pi[3]
"""
if self.has_descent(i, side=side, positive=length_increasing):
return self.apply_simple_reflection(i, side=side)
return self
def to_fundamental_group(self):
r"""
Return the image of ``self`` under the homomorphism to the fundamental group.
EXAMPLES::
sage: PW0 = ExtendedAffineWeylGroup(['A',3,1]).PW0()
sage: b = PW0.realization_of().lattice_basis()
sage: [(x, PW0.from_translation(x).to_fundamental_group()) for x in b]
[(Lambdacheck[1], pi[1]), (Lambdacheck[2], pi[2]), (Lambdacheck[3], pi[3])]
.. WARNING::
Must be implemented in style "WF".
"""
WF = self.parent().realization_of().WF()
return WF(self).to_fundamental_group()
def to_classical_weyl(self):
r"""
Return the image of ``self`` under the homomorphism to the classical Weyl group.
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',3,1]).WF().simple_reflection(0).to_classical_weyl()
s1*s2*s3*s2*s1
.. WARNING::
Must be implemented in style "PW0".
"""
PW0 = self.parent().realization_of().PW0()
return PW0(self).to_classical_weyl()
def to_dual_classical_weyl(self):
r"""
Return the image of ``self`` under the homomorphism to the dual form of the classical Weyl group.
EXAMPLES::
sage: x = ExtendedAffineWeylGroup(['A',3,1]).WF().simple_reflection(0).to_dual_classical_weyl(); x
s1*s2*s3*s2*s1
sage: x.parent()
Weyl Group of type ['A', 3] (as a matrix group acting on the weight lattice)
.. WARNING::
Must be implemented in style "PvW0".
"""
PvW0 = self.parent().realization_of().PvW0()
return PvW0(self).to_dual_classical_weyl()
def to_affine_weyl_left(self):
r"""
Return the projection of ``self`` to the affine Weyl group on the left,
after factorizing using the style "WF".
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',3,1]); PW0 = E.PW0()
sage: b = E.lattice_basis()
sage: [(x,PW0.from_translation(x).to_affine_weyl_left()) for x in b]
[(Lambdacheck[1], S0*S3*S2), (Lambdacheck[2], S0*S3*S1*S0), (Lambdacheck[3], S0*S1*S2)]
.. WARNING::
Must be implemented in style "WF".
"""
WF = self.parent().realization_of().WF()
return WF(self).to_affine_weyl_left()
def to_affine_weyl_right(self):
r"""
Return the projection of ``self`` to the affine Weyl group on the right,
after factorizing using the style "FW".
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',3,1]); PW0=E.PW0()
sage: b = E.lattice_basis()
sage: [(x,PW0.from_translation(x).to_affine_weyl_right()) for x in b]
[(Lambdacheck[1], S3*S2*S1), (Lambdacheck[2], S2*S3*S1*S2), (Lambdacheck[3], S1*S2*S3)]
.. WARNING::
Must be implemented in style "FW".
"""
FW = self.parent().realization_of().FW()
return FW(self).to_affine_weyl_right()
def to_translation_left(self):
r"""
Return the projection of ``self`` to the translation lattice after factorizing
it to the left using the style "PW0".
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',3,1]).PW0().simple_reflection(0).to_translation_left()
Lambdacheck[1] + Lambdacheck[3]
.. WARNING::
Must be implemented in style "PW0".
"""
PW0 = self.parent().realization_of().PW0()
return PW0(self).to_translation_left()
def to_translation_right(self):
r"""
Return the projection of ``self`` to the translation lattice after factorizing
it to the right using the style "W0P".
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',3,1]).PW0().simple_reflection(0).to_translation_right()
-Lambdacheck[1] - Lambdacheck[3]
.. WARNING::
Must be implemented in style "W0P".
"""
W0P = self.parent().realization_of().W0P()
return W0P(self).to_translation_right()
def to_dual_translation_left(self):
r"""
Return the projection of ``self`` to the dual translation lattice after factorizing
it to the left using the style "PvW0".
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',3,1]).PvW0().simple_reflection(0).to_dual_translation_left()
Lambda[1] + Lambda[3]
.. WARNING::
Must be implemented in style "PvW0".
"""
PvW0 = self.parent().realization_of().PvW0()
return PvW0(self).to_dual_translation_left()
def to_dual_translation_right(self):
r"""
Return the projection of ``self`` to the dual translation lattice after factorizing
it to the right using the style "W0Pv".
EXAMPLES::
sage: ExtendedAffineWeylGroup(['A',3,1]).PW0().simple_reflection(0).to_dual_translation_right()
-Lambda[1] - Lambda[3]
.. WARNING::
Must be implemented in style "W0Pv".
"""
W0Pv = self.parent().realization_of().W0Pv()
return W0Pv(self).to_dual_translation_right()
def length(self):
r"""
Return the length of ``self`` in the Coxeter group sense.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',3,1]); PW0=E.PW0()
sage: I0 = E.cartan_type().classical().index_set()
sage: [PW0.from_translation(E.lattice_basis()[i]).length() for i in I0]
[3, 4, 3]
"""
return self.to_affine_weyl_left().length()
def coset_representative(self, index_set, side='right'):
r"""
Return the minimum length representative in the coset of ``self`` with respect to
the subgroup generated by the reflections given by ``index_set``.
INPUT:
- ``self`` -- an element of the extended affine Weyl group
- ``index_set`` -- a subset of the set of Dynkin nodes
- ``side`` -- 'right' or 'left' (default: 'right') the side on which the subgroup acts
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',3,1]); WF = E.WF()
sage: b = E.lattice_basis()
sage: I0 = E.cartan_type().classical().index_set()
sage: [WF.from_translation(x).coset_representative(index_set=I0) for x in b]
[pi[1], pi[2], pi[3]]
"""
while True:
i = self.first_descent(index_set=index_set, side=side)
if i is None:
return self
self = self.apply_simple_reflection(i,side=side)
def is_grassmannian(self, index_set, side='right'):
r"""
Return whether ``self`` is of minimum length in its coset with respect to the
subgroup generated by the reflections of ``index_set``.
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(['A',3,1]); PW0=E.PW0()
sage: x = PW0.from_translation(E.lattice_basis()[1]); x
t[Lambdacheck[1]]
sage: I = E.cartan_type().index_set()
sage: [(i, x.is_grassmannian(index_set=[i])) for i in I]
[(0, True), (1, False), (2, True), (3, True)]
sage: [(i, x.is_grassmannian(index_set=[i], side='left')) for i in I]
[(0, False), (1, True), (2, True), (3, True)]
"""
return self == self.coset_representative(index_set=index_set,side=side)
def to_affine_grassmannian(self):
r"""
Return the unique affine | |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Tests for the `verdi group` command."""
import pytest
from aiida import orm
from aiida.cmdline.commands import cmd_group
from aiida.cmdline.utils.echo import ExitCode
from aiida.common import exceptions
class TestVerdiGroup:
"""Tests for the `verdi group` command."""
@pytest.fixture(autouse=True)
def init_profile(self, aiida_profile_clean, run_cli_command): # pylint: disable=unused-argument
"""Initialize the profile."""
# pylint: disable=attribute-defined-outside-init
for group in ['dummygroup1', 'dummygroup2', 'dummygroup3', 'dummygroup4']:
orm.Group(label=group).store()
self.cli_runner = run_cli_command
def test_help(self):
"""Tests help text for all group sub commands."""
options = ['--help']
# verdi group list
result = self.cli_runner(cmd_group.group_list, options)
assert 'Usage' in result.output
# verdi group create
result = self.cli_runner(cmd_group.group_create, options)
assert 'Usage' in result.output
# verdi group delete
result = self.cli_runner(cmd_group.group_delete, options)
assert 'Usage' in result.output
# verdi group relabel
result = self.cli_runner(cmd_group.group_relabel, options)
assert 'Usage' in result.output
# verdi group description
result = self.cli_runner(cmd_group.group_description, options)
assert 'Usage' in result.output
# verdi group addnodes
result = self.cli_runner(cmd_group.group_add_nodes, options)
assert 'Usage' in result.output
# verdi group removenodes
result = self.cli_runner(cmd_group.group_remove_nodes, options)
assert 'Usage' in result.output
# verdi group show
result = self.cli_runner(cmd_group.group_show, options)
assert 'Usage' in result.output
# verdi group copy
result = self.cli_runner(cmd_group.group_copy, options)
assert 'Usage' in result.output
def test_create(self):
"""Test `verdi group create` command."""
result = self.cli_runner(cmd_group.group_create, ['dummygroup5'])
# check if newly added group in present in list
result = self.cli_runner(cmd_group.group_list)
assert 'dummygroup5' in result.output
def test_list(self):
"""Test `verdi group list` command."""
result = self.cli_runner(cmd_group.group_list)
for grp in ['dummygroup1', 'dummygroup2']:
assert grp in result.output
def test_list_order(self):
"""Test `verdi group list` command with ordering options."""
orm.Group(label='agroup').store()
options = []
result = self.cli_runner(cmd_group.group_list, options)
group_ordering = [l.split()[1] for l in result.output.split('\n')[3:] if l]
assert ['agroup', 'dummygroup1', 'dummygroup2', 'dummygroup3', 'dummygroup4'] == group_ordering
options = ['--order-by', 'id']
result = self.cli_runner(cmd_group.group_list, options)
group_ordering = [l.split()[1] for l in result.output.split('\n')[3:] if l]
assert ['dummygroup1', 'dummygroup2', 'dummygroup3', 'dummygroup4', 'agroup'] == group_ordering
options = ['--order-by', 'id', '--order-direction', 'desc']
result = self.cli_runner(cmd_group.group_list, options)
group_ordering = [l.split()[1] for l in result.output.split('\n')[3:] if l]
assert ['agroup', 'dummygroup4', 'dummygroup3', 'dummygroup2', 'dummygroup1'] == group_ordering
def test_copy(self):
"""Test `verdi group copy` command."""
result = self.cli_runner(cmd_group.group_copy, ['dummygroup1', 'dummygroup2'])
assert 'Success' in result.output
def test_delete(self):
"""Test `verdi group delete` command."""
orm.Group(label='group_test_delete_01').store()
orm.Group(label='group_test_delete_02').store()
orm.Group(label='group_test_delete_03').store()
# dry run
result = self.cli_runner(cmd_group.group_delete, ['--dry-run', 'group_test_delete_01'])
orm.load_group(label='group_test_delete_01')
result = self.cli_runner(cmd_group.group_delete, ['--force', 'group_test_delete_01'])
# Verify that removed group is not present in list
result = self.cli_runner(cmd_group.group_list)
assert 'group_test_delete_01' not in result.output
node_01 = orm.CalculationNode().store()
node_02 = orm.CalculationNode().store()
node_pks = {node_01.pk, node_02.pk}
# Add some nodes and then use `verdi group delete` to delete a group that contains nodes
group = orm.load_group(label='group_test_delete_02')
group.add_nodes([node_01, node_02])
assert group.count() == 2
result = self.cli_runner(cmd_group.group_delete, ['--force', 'group_test_delete_02'])
with pytest.raises(exceptions.NotExistent):
orm.load_group(label='group_test_delete_02')
# check nodes still exist
for pk in node_pks:
orm.load_node(pk)
# delete the group and the nodes it contains
group = orm.load_group(label='group_test_delete_03')
group.add_nodes([node_01, node_02])
result = self.cli_runner(cmd_group.group_delete, ['--force', '--delete-nodes', 'group_test_delete_03'])
# check group and nodes no longer exist
with pytest.raises(exceptions.NotExistent):
orm.load_group(label='group_test_delete_03')
for pk in node_pks:
with pytest.raises(exceptions.NotExistent):
orm.load_node(pk)
def test_show(self):
"""Test `verdi group show` command."""
result = self.cli_runner(cmd_group.group_show, ['dummygroup1'])
for grpline in [
'Group label', 'dummygroup1', 'Group type_string', 'core', 'Group description', '<no description>'
]:
assert grpline in result.output
def test_show_limit(self):
"""Test `--limit` option of the `verdi group show` command."""
label = 'test_group_limit'
nodes = [orm.Data().store(), orm.Data().store()]
group = orm.Group(label=label).store()
group.add_nodes(nodes)
# Default should include all nodes in the output
result = self.cli_runner(cmd_group.group_show, [label])
for node in nodes:
assert str(node.pk) in result.output
# Repeat test with `limit=1`, use also the `--raw` option to only display nodes
result = self.cli_runner(cmd_group.group_show, [label, '--limit', '1', '--raw'])
# The current `verdi group show` does not support ordering so we cannot rely on that for now to test if only
# one of the nodes is shown
assert len(result.output.strip().split('\n')) == 1
assert str(nodes[0].pk) in result.output or str(nodes[1].pk) in result.output
# Repeat test with `limit=1` but without the `--raw` flag as it has a different code path that is affected
result = self.cli_runner(cmd_group.group_show, [label, '--limit', '1'])
# Check that one, and only one pk appears in the output
assert str(nodes[0].pk) in result.output or str(nodes[1].pk) in result.output
assert not (str(nodes[0].pk) in result.output and str(nodes[1].pk) in result.output)
def test_description(self):
"""Test `verdi group description` command."""
description = 'It is a new description'
group = orm.load_group(label='dummygroup2')
assert group.description != description
# Change the description of the group
result = self.cli_runner(cmd_group.group_description, [group.label, description])
assert group.description == description
# When no description argument is passed the command should just echo the current description
result = self.cli_runner(cmd_group.group_description, [group.label])
assert description in result.output
def test_relabel(self):
"""Test `verdi group relabel` command."""
result = self.cli_runner(cmd_group.group_relabel, ['dummygroup4', 'relabeled_group'])
# check if group list command shows changed group name
result = self.cli_runner(cmd_group.group_list)
assert 'dummygroup4' not in result.output
assert 'relabeled_group' in result.output
def test_add_remove_nodes(self):
"""Test `verdi group remove-nodes` command."""
node_01 = orm.CalculationNode().store()
node_02 = orm.CalculationNode().store()
node_03 = orm.CalculationNode().store()
result = self.cli_runner(cmd_group.group_add_nodes, ['--force', '--group=dummygroup1', node_01.uuid])
# Check if node is added in group using group show command
result = self.cli_runner(cmd_group.group_show, ['dummygroup1'])
assert 'CalculationNode' in result.output
assert str(node_01.pk) in result.output
# Remove same node
result = self.cli_runner(cmd_group.group_remove_nodes, ['--force', '--group=dummygroup1', node_01.uuid])
# Check that the node is no longer in the group
result = self.cli_runner(cmd_group.group_show, ['-r', 'dummygroup1'])
assert 'CalculationNode' not in result.output
assert str(node_01.pk) not in result.output
# Add all three nodes and then use `verdi group remove-nodes --clear` to remove them all
group = orm.load_group(label='dummygroup1')
group.add_nodes([node_01, node_02, node_03])
assert group.count() == 3
result = self.cli_runner(cmd_group.group_remove_nodes, ['--force', '--clear', '--group=dummygroup1'])
assert group.count() == 0
# Try to remove node that isn't in the group
result = self.cli_runner(cmd_group.group_remove_nodes, ['--group=dummygroup1', node_01.uuid], raises=True)
assert result.exit_code == ExitCode.CRITICAL
# Try to remove no nodes nor clear the group
result = self.cli_runner(cmd_group.group_remove_nodes, ['--group=dummygroup1'], raises=True)
assert result.exit_code == ExitCode.CRITICAL
# Try to remove both nodes and clear the group
result = self.cli_runner(
cmd_group.group_remove_nodes, ['--group=dummygroup1', '--clear', node_01.uuid], raises=True
)
assert result.exit_code == ExitCode.CRITICAL
# Add a node with confirmation
result = self.cli_runner(cmd_group.group_add_nodes, ['--group=dummygroup1', node_01.uuid], user_input='y')
assert group.count() == 1
# Try to remove two nodes, one that isn't in the group, but abort
result = self.cli_runner(
cmd_group.group_remove_nodes, ['--group=dummygroup1', node_01.uuid, node_02.uuid],
user_input='N',
raises=True
)
assert 'Warning' in result.output
assert group.count() == 1
# Try to clear all nodes from the group, but abort
result = self.cli_runner(
cmd_group.group_remove_nodes, ['--group=dummygroup1', '--clear'], user_input='N', raises=True
)
assert 'Are you sure you want to remove ALL' in result.output
assert 'Aborted' in result.output
assert group.count() == 1
def test_move_nodes(self):
"""Test `verdi group move-nodes` command."""
node_01 = orm.CalculationNode().store()
node_02 = orm.Int(1).store()
node_03 = orm.Bool(True).store()
group1 = orm.load_group('dummygroup1')
group2 = orm.load_group('dummygroup2')
group1.add_nodes([node_01, node_02])
# Moving the nodes to the same group
result = self.cli_runner(
cmd_group.group_move_nodes, ['-s', 'dummygroup1', '-t', 'dummygroup1', node_01.uuid, node_02.uuid],
raises=True
)
assert 'Source and target group are the same:' in result.output
# Not specifying NODES or `--all`
result = self.cli_runner(cmd_group.group_move_nodes, ['-s', 'dummygroup1', '-t', 'dummygroup2'], raises=True)
assert 'Neither NODES or the `-a, --all` option was specified.' in result.output
# Moving the nodes from the empty group
result = self.cli_runner(
cmd_group.group_move_nodes, ['-s', 'dummygroup2', '-t', 'dummygroup1', node_01.uuid, node_02.uuid],
raises=True
)
assert 'None of the specified nodes are in' in result.output
# Move two nodes to the second dummy group, but specify a missing uuid
result = self.cli_runner(
cmd_group.group_move_nodes, ['-s', 'dummygroup1', '-t', 'dummygroup2', node_01.uuid, node_03.uuid],
raises=True
)
assert f'1 nodes with PK {{{node_03.pk}}} are not in' in result.output
# Check that the node that is present is actually moved
result = self.cli_runner(
cmd_group.group_move_nodes,
['-f', '-s', 'dummygroup1', '-t', 'dummygroup2', node_01.uuid, node_03.uuid],
)
assert node_01 not in group1.nodes
assert node_01 in group2.nodes
# Add the first node back to the first group, and try to move it from the second one
group1.add_nodes(node_01)
result = self.cli_runner(
cmd_group.group_move_nodes, ['-s', 'dummygroup2', '-t', 'dummygroup1', node_01.uuid], raises=True
)
assert f'1 nodes with PK {{{node_01.pk}}} are already' in result.output
# Check that | |
# container-service-extension
# Copyright (c) 2017 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
from urllib.parse import urlparse
import click
import pika
from pyvcloud.vcd.api_extension import APIExtension
from pyvcloud.vcd.client import BasicLoginCredentials
from pyvcloud.vcd.client import Client
from pyvcloud.vcd.client import FenceMode
from pyvcloud.vcd.exceptions import EntityNotFoundException
from pyvcloud.vcd.exceptions import MissingRecordException
from pyvcloud.vcd.exceptions import OperationNotSupportedException
from pyvcloud.vcd.org import Org
from pyvcloud.vcd.platform import Platform
from pyvcloud.vcd.vapp import VApp
import requests
from requests.exceptions import HTTPError
from vcd_cli.utils import stdout
from vsphere_guest_run.vsphere import VSphere
import yaml
from container_service_extension.exceptions import AmqpConnectionError
from container_service_extension.exceptions import AmqpError
from container_service_extension.logger import configure_install_logger
from container_service_extension.logger import INSTALL_LOG_FILEPATH
from container_service_extension.logger import INSTALL_LOGGER as LOGGER
from container_service_extension.logger import SERVER_DEBUG_WIRELOG_FILEPATH
from container_service_extension.logger import setup_log_file_directory
from container_service_extension.nsxt.cse_nsxt_setup_utils import \
setup_nsxt_constructs
from container_service_extension.nsxt.dfw_manager import DFWManager
from container_service_extension.nsxt.ipset_manager import IPSetManager
from container_service_extension.nsxt.nsxt_client import NSXTClient
from container_service_extension.server_constants import \
CSE_NATIVE_DEPLOY_RIGHT_BUNDLE_KEY, CSE_NATIVE_DEPLOY_RIGHT_CATEGORY, \
CSE_NATIVE_DEPLOY_RIGHT_DESCRIPTION, CSE_NATIVE_DEPLOY_RIGHT_NAME, \
CSE_PKS_DEPLOY_RIGHT_BUNDLE_KEY, CSE_PKS_DEPLOY_RIGHT_CATEGORY, \
CSE_PKS_DEPLOY_RIGHT_DESCRIPTION, CSE_PKS_DEPLOY_RIGHT_NAME, \
CSE_SERVICE_NAME, CSE_SERVICE_NAMESPACE # noqa
from container_service_extension.utils import catalog_exists
from container_service_extension.utils import catalog_item_exists
from container_service_extension.utils import check_file_permissions
from container_service_extension.utils import check_keys_and_value_types
from container_service_extension.utils import create_and_share_catalog
from container_service_extension.utils import download_file
from container_service_extension.utils import EXCHANGE_TYPE
from container_service_extension.utils import get_data_file
from container_service_extension.utils import get_duplicate_items_in_list
from container_service_extension.utils import get_org
from container_service_extension.utils import get_vdc
from container_service_extension.utils import get_vsphere
from container_service_extension.utils import SYSTEM_ORG_NAME
from container_service_extension.utils import upload_ova_to_catalog
from container_service_extension.utils import vgr_callback
from container_service_extension.utils import wait_for_catalog_item_to_resolve
from container_service_extension.utils import wait_until_tools_ready
# used for creating temp vapp
TEMP_VAPP_NETWORK_ADAPTER_TYPE = "vmxnet3"
TEMP_VAPP_FENCE_MODE = FenceMode.BRIDGED.value
INSTRUCTIONS_FOR_PKS_CONFIG_FILE = "\
# Config file for PKS enabled CSE Server to be filled by administrators.\n\
# This config file has the following four sections:\n\
# 1. pks_api_servers:\n\
# a. Each entry in the list represents a PKS api server that is part \n\
# of the deployment.\n\
# b. The field 'name' in each entry should be unique. The value of \n\
# the field has no bearing on the real world PKS api server, it's \n\
# used to tie in various segments of the config file together.\n\
# c. The field 'vc' represents the name with which the PKS vCenter \n\
# is registered in vCD.\n\
# 2. pks_accounts:\n\
# a. Each entry in the list represents a PKS account that can be used \n\
# talk to a certain PKS api server.\n\
# b. The field 'name' in each entry should be unique. The value of \n\
# the field has no bearing on the real world PKS accounts, it's \n\
# used to tie in various segments of the config file together.\n\
# c. The field 'pks_api_server' is a reference to the PKS api server \n\
# which owns this account. It's value should be equal to value of \n\
# the field 'name' of the corresponding PKS api server.\n\
# 3. pvdcs:\n\
# a. Each entry in the list represents a Provider VDC in vCD that is \n\
# backed by a cluster of the PKS managed vCenter server.\n\
# b. The field 'name' in each entry should be the name of the \n\
# Provider VDC as it appears in vCD.\n\
# c. The field 'pks_api_server' is a reference to the PKS api server \n\
# which owns this account. It's value should be equal to value of \n\
# the field 'name' of the corresponding PKS api server.\n\
# 4. nsxt_servers:\n\
# a. Each entry in the list represents a NSX-T server that has been \n\
# alongside a PKS server to manage its networking. CSE needs these \n\
# details to enforce network isolation of clusters.\n\
# b. The field 'name' in each entry should be unique. The value of \n\
# the field has no bearing on the real world NSX-T server, it's \n\
# used to tie in various segments of the config file together.\n\
# c. The field 'pks_api_server' is a reference to the PKS api server \n\
# which owns this account. It's value should be equal to value of \n\
# the field 'name' of the corresponding PKS api server.\n\
# d. The field 'distributed_firewall_section_anchor_id' should be \n\
# populated with id of a Distributed Firewall Section e.g. it can \n\
# be the id of the section called 'Default Layer3 Section' which \n\
# PKS creates on installation.\n\
# For more information, please refer to CSE documentation page:\n\
# https://vmware.github.io/container-service-extension/INSTALLATION.html\n"
NOTE_FOR_PKS_KEY_IN_CONFIG_FILE = "\
# Filling out this key for regular CSE set up is optional and should be left\n\
# as is. Only for CSE set up enabled for PKS container provider, this value\n\
# needs to point to a valid PKS config file name.\n"
PKS_CONFIG_NOTE = "\
# [OPTIONAL] PKS CONFIGS\n\
# These configs are required only for customers with PKS enabled CSE.\n\
# Regular CSE users, with no PKS container provider in their system, do not \n\
# need these configs to be filled out in a separate yaml file."
SAMPLE_AMQP_CONFIG = {
'amqp': {
'host': 'amqp.vmware.com',
'port': 5672,
'prefix': 'vcd',
'username': 'guest',
'password': '<PASSWORD>',
'exchange': 'cse-ext',
'routing_key': 'cse',
'ssl': False,
'ssl_accept_all': False,
'vhost': '/'
}
}
SAMPLE_VCD_CONFIG = {
'vcd': {
'host': 'vcd.vmware.com',
'port': 443,
'username': 'administrator',
'password': '<PASSWORD>',
'api_version': '31.0',
'verify': True,
'log': True
}
}
SAMPLE_VCS_CONFIG = {
'vcs': [
{
'name': 'vc1',
'username': '<EMAIL>',
'password': '<PASSWORD>',
'verify': True
},
{
'name': 'vc2',
'username': '<EMAIL>',
'password': '<PASSWORD>',
'verify': True
}
]
}
SAMPLE_SERVICE_CONFIG = {
'service': {
'listeners': 5,
'enforce_authorization': False
}
}
SAMPLE_TEMPLATE_PHOTON_V2 = {
'name': 'photon-v2',
'catalog_item': 'photon-custom-hw11-2.0-304b817-k8s',
'source_ova_name': 'photon-custom-hw11-2.0-304b817.ova',
'source_ova': 'http://dl.bintray.com/vmware/photon/2.0/GA/ova/photon-custo\
m-hw11-2.0-304b817.ova',
'sha256_ova': 'cb51e4b6d899c3588f961e73282709a0d054bb421787e140a1d80c24d4f\
d89e1',
'temp_vapp': 'photon2-temp',
'cleanup': True,
'cpu': 2,
'mem': 2048,
'admin_password': '<PASSWORD>',
'description': 'PhotonOS v2\nDocker 17.06.0-9\nKubernetes 1.10.11\nweave \
2.3.0'
}
SAMPLE_TEMPLATE_UBUNTU_16_04 = {
'name': 'ubuntu-16.04',
'catalog_item': 'ubuntu-16.04-server-cloudimg-amd64-k8s',
'source_ova_name': 'ubuntu-16.04-server-cloudimg-amd64.ova',
'source_ova': 'https://cloud-images.ubuntu.com/releases/xenial/release-201\
80418/ubuntu-16.04-server-cloudimg-amd64.ova',
'sha256_ova': '3c1bec8e2770af5b9b0462e20b7b24633666feedff43c099a6fb1330fcc\
869a9',
'temp_vapp': 'ubuntu1604-temp',
'cleanup': True,
'cpu': 2,
'mem': 2048,
'admin_password': '<PASSWORD>',
'description': 'Ubuntu 16.04\nDocker 18.06.2~ce\nKubernetes 1.10.11\nweave\
2.3.0'
}
SAMPLE_BROKER_CONFIG = {
'broker': {
'type': 'default',
'org': 'myorg',
'vdc': 'myorgvdc',
'catalog': 'cse',
'network': 'mynetwork',
'ip_allocation_mode': 'pool',
'storage_profile': '*',
'default_template': SAMPLE_TEMPLATE_PHOTON_V2['name'],
'templates': [SAMPLE_TEMPLATE_PHOTON_V2, SAMPLE_TEMPLATE_UBUNTU_16_04],
'cse_msg_dir': '/tmp/cse'
}
}
PKS_CONFIG_FILE_LOCATION_SECTION_KEY = 'pks_config'
SAMPLE_PKS_CONFIG_FILE_LOCATION = {
PKS_CONFIG_FILE_LOCATION_SECTION_KEY: None
}
PKS_SERVERS_SECTION_KEY = 'pks_api_servers'
SAMPLE_PKS_SERVERS_SECTION = {
PKS_SERVERS_SECTION_KEY: [
{
'name': 'pks-api-server-1',
'host': 'pks-api-server-1.pks.local',
'port': '9021',
'uaac_port': '8443',
# 'proxy': 'proxy1.pks.local:80',
'datacenter': 'pks-s1-dc',
'clusters': ['pks-s1-az-1', 'pks-s1-az-2', 'pks-s1-az-3'],
'cpi': 'cpi1',
'vc': 'vc1',
'verify': True
}, {
'name': 'pks-api-server-2',
'host': 'pks-api-server-2.pks.local',
'port': '9021',
'uaac_port': '8443',
# 'proxy': 'proxy2.pks.local:80',
'datacenter': 'pks-s2-dc',
'clusters': ['pks-s2-az-1', 'pks-s2-az-2', 'pks-s2-az-3'],
'cpi': 'cpi2',
'vc': 'vc2',
'verify': True
}
]
}
PKS_ACCOUNTS_SECTION_KEY = 'pks_accounts'
SAMPLE_PKS_ACCOUNTS_SECTION = {
PKS_ACCOUNTS_SECTION_KEY: [
{
'name': 'Org1ServiceAccount1',
'pks_api_server': 'pks-api-server-1',
'secret': 'secret',
'username': 'org1Admin'
}, {
'name': 'Org1ServiceAccount2',
'pks_api_server': 'pks-api-server-2',
'secret': 'secret',
'username': 'org1Admin'
}, {
'name': 'Org2ServiceAccount',
'pks_api_server': 'pks-api-server-2',
'secret': 'secret',
'username': 'org2Admin'
}
]
}
PKS_ORGS_SECTION_KEY = 'orgs'
SAMPLE_PKS_ORGS_SECTION = {
PKS_ORGS_SECTION_KEY: [
{
'name': 'Org1',
'pks_accounts': ['Org1ServiceAccount1', 'Org1ServiceAccount2']
}, {
'name': 'Org2',
'pks_accounts': ['Org2ServiceAccount']
}
]
}
PKS_PVDCS_SECTION_KEY = 'pvdcs'
SAMPLE_PKS_PVDCS_SECTION = {
PKS_PVDCS_SECTION_KEY: [
{
'name': 'pvdc1',
'pks_api_server': 'pks-api-server-1',
'cluster': 'pks-s1-az-1',
}, {
'name': 'pvdc2',
'pks_api_server': 'pks-api-server-2',
'cluster': 'pks-s2-az-1'
}, {
'name': 'pvdc3',
'pks_api_server': 'pks-api-server-1',
'cluster': 'pks-s1-az-2'
}
]
}
PKS_NSXT_SERVERS_SECTION_KEY = 'nsxt_servers'
SAMPLE_PKS_NSXT_SERVERS_SECTION = {
PKS_NSXT_SERVERS_SECTION_KEY: [
{
'name': 'nsxt-server-1',
'host': 'nsxt1.domain.local',
'username': 'admin',
'password': '<PASSWORD>',
'pks_api_server': 'pks-api-server-1',
# 'proxy': 'proxy1.pks.local:80',
'nodes_ip_block_ids': ['id1', 'id2'],
'pods_ip_block_ids': ['id1', 'id2'],
'distributed_firewall_section_anchor_id': 'id',
'verify': True
}, {
'name': 'nsxt-server-2',
'host': 'nsxt2.domain.local',
'username': 'admin',
'password': '<PASSWORD>',
'pks_api_server': 'pks-api-server-2',
# 'proxy': 'proxy2.pks.local:80',
'nodes_ip_block_ids': ['id1', 'id2'],
'pods_ip_block_ids': ['id1', 'id2'],
'distributed_firewall_section_anchor_id': 'id',
'verify': True
}
]
}
def generate_sample_config(output=None, pks_output=None):
"""Generate sample configs for cse.
If config file names are
provided, configs are dumped into respective files.
:param str output: name of the config file to dump the CSE configs.
:param str pks_output: name of the PKS config file to dump the PKS
configs.
:return: sample config/ sample config files
:rtype: dict
"""
sample_config = yaml.safe_dump(SAMPLE_AMQP_CONFIG,
default_flow_style=False) + '\n'
sample_config += yaml.safe_dump(SAMPLE_VCD_CONFIG,
default_flow_style=False) + '\n'
sample_config += yaml.safe_dump(SAMPLE_VCS_CONFIG,
default_flow_style=False) + '\n'
sample_config += yaml.safe_dump(SAMPLE_SERVICE_CONFIG,
default_flow_style=False) + '\n'
sample_config += yaml.safe_dump(SAMPLE_BROKER_CONFIG,
default_flow_style=False) + '\n'
sample_config += NOTE_FOR_PKS_KEY_IN_CONFIG_FILE + '\n'
if pks_output:
pks_config_location_dict = {}
pks_config_location_dict[PKS_CONFIG_FILE_LOCATION_SECTION_KEY] = \
f"{pks_output}"
sample_config += yaml.safe_dump(pks_config_location_dict,
default_flow_style=False)
else:
sample_config += yaml.safe_dump(SAMPLE_PKS_CONFIG_FILE_LOCATION,
default_flow_style=False)
sample_pks_config = yaml.safe_dump(
SAMPLE_PKS_SERVERS_SECTION, default_flow_style=False) + '\n'
sample_pks_config += yaml.safe_dump(
SAMPLE_PKS_ACCOUNTS_SECTION, default_flow_style=False) + '\n'
# Org - PKS account mapping section will be supressed for CSE 2.0 alpha
# sample_pks_config += yaml.safe_dump(
# SAMPLE_PKS_ORGS_SECTION, default_flow_style=False) + '\n'
sample_pks_config += yaml.safe_dump(
SAMPLE_PKS_PVDCS_SECTION, default_flow_style=False) + '\n'
sample_pks_config += yaml.safe_dump(
SAMPLE_PKS_NSXT_SERVERS_SECTION, default_flow_style=False)
if output:
with open(output, 'w') as f:
f.write(sample_config)
if pks_output:
with open(pks_output, 'w') as f:
f.write(f"{INSTRUCTIONS_FOR_PKS_CONFIG_FILE}\n{sample_pks_config}")
return sample_config.strip() + '\n\n' + PKS_CONFIG_NOTE + '\n\n' + \
sample_pks_config.strip()
def get_validated_config(config_file_name):
"""Get the config file as a dictionary and check for validity.
Ensures that all properties exist and all values are the expected type.
Checks that AMQP connection is available, and vCD/VCs are valid.
Does not guarantee that CSE has been | |
<reponame>pyviz/nbsite
import os
import glob
import logging
import requests
import sphinx.util
try:
import bs4
except:
bs4 = None
from .thumbnailer import notebook_thumbnail, execute
logger = sphinx.util.logging.getLogger('nbsite-gallery')
logging.getLogger(requests.packages.urllib3.__package__).setLevel(logging.ERROR)
BUTTON_GROUP_TEMPLATE = """
.. raw:: html
<script>
function gallery_toggle(input) {{
backends = {backends};
for (i in backends) {{
entries = $('.'+backends[i]+'_example');
if (backends[i] == input) {{
entries.show();
}} else {{
entries.hide()
}}
}}
}}
</script>
<ul class="tab">
{buttons}
</ul>
"""
BUTTON_TEMPLATE = """
<li>
<input id="tab{N}" {checked} type="radio" name="tab" onclick="gallery_toggle('{label}'.toLowerCase())" />
<label for="tab{N}">{label}</label>
</li>
"""
HIDE_JS = """
.. raw:: html
<script>
$(document).ready(function () {{
backends = {backends};
for (var i=0; i<backends.length; i++){{
$('.'+backends[i]+'_example').hide();
}}
}});
</script>
"""
CLEAR_DIV = """
.. raw:: html
<div style='clear:both'></div>
"""
THUMBNAIL_URL = 'https://assets.holoviews.org/thumbnails'
PREFIX = """
# -*- coding: utf-8 -*-
import holoviews as hv
from pyviz_comms import Comm
try:
import holoviews.plotting.mpl
hv.Store.renderers['matplotlib'].comms['default'] = (Comm, '')
except:
pass
try:
import holoviews.plotting.bokeh
hv.Store.renderers['bokeh'].comms['default'] = (Comm, '')
except:
pass
try:
import holoviews.plotting.widgets as hw
hw.NdWidget.export_json=True
hw.NdWidget.json_load_path = './'
hw.NdWidget.json_save_path = './'
del hw
except:
pass
hv.plotting.mpl.MPLPlot.fig_alpha = 0
hv.plotting.bokeh.callbacks.Callback._comm_type = Comm
"""
REFERENCE_INTRO="""
The gallery presents the various components made available by
HoloViews from which you can build new visualizations. If you wish to
see a collection of more involved examples, see the `Gallery
<../gallery/index.html>`_. To get started with HoloViews, see our
`Getting Started <../getting_started/index.html>`_ guide and for more
detailed documentation our `User Guide <../user_guide/index.html>`_.
"""
GALLERY_INTRO="""
The gallery shows the breadth of what HoloViews is capable of with a
varied collection of examples. If you are looking for a specific
component (or wish to view the available range of primitives), see our
`Reference Gallery <../reference/index.html>`_. To get started with
HoloViews, see our `Getting Started <../getting_started/index.html>`_
guide and for more detailed documentation our `User Guide
<../user_guide/index.html>`_.
"""
THUMBNAIL_TEMPLATE = """
.. raw:: html
<div class="sphx-glr-thumbcontainer {backend}example" tooltip="{label}">
.. figure:: /{thumbnail}
:ref:`{label} <{prefix}gallery_{ref_name}>`
.. raw:: html
</div>
"""
IFRAME_TEMPLATE = """
.. raw:: html
<style>
.iframe-container {{
overflow: hidden;
padding-top: 56.25%;
position: relative;
background: url({background}) center center no-repeat;
}}
.iframe-container iframe {{
border: 0;
height: 100%;
left: 0;
position: absolute;
top: 0;
width: 100%;
}}
</style>
<div class="iframe-container">
<iframe src="{url}" width="100%" frameborder="0" onload="this.parentNode.style.background = 'none'"></iframe>
</div>
"""
INLINE_GALLERY_STYLE = """
.. raw:: html
<style>
.sphx-glr-section {
display: inline-block;
vertical-align: top;
padding-right: 20px;
}
</style>
"""
DEFAULT_GALLERY_CONF = {
'backends': None,
'default_extensions': ['*.ipynb', '*.py'],
'enable_download': True,
'only_use_existing': False,
'examples_dir': os.path.join('..', 'examples'),
'labels_dir': 'labels',
'galleries': {
'gallery': {
'backends': [],
'extensions': ['*.ipynb', '*.py'],
'intro': 'Sample intro',
'title': 'A sample gallery title',
'sections': [],
}
},
'host': 'GitHub', # set this to assets to have download happen from assets
'download_as': None, # set this to 'project' to use project archives as download
'github_org': None,
'github_project': None,
'deployment_url': None,
'iframe_spinner': "https://assets.holoviews.org/static/spinner.gif",
'inline': False,
'script_prefix': PREFIX,
'skip_execute': [],
'thumbnail_url': THUMBNAIL_URL,
'thumbnail_size': (400, 280),
'within_subsection_order': lambda key: key,
'nblink': 'both', # use this to control the position of the nblink
'github_ref': 'master', # branch or tag
}
def get_deployed_url(deployment_urls, basename):
for deployment_url in deployment_urls:
# Test the deployment_url/basename, then deployment_url/notebooks/basename.ipynb
candidates = [os.path.join(deployment_url,
basename[:-6] if basename.endswith('.ipynb') else basename),
os.path.join(deployment_url, 'notebooks',
basename if basename.endswith('ipynb')
else '%s.ipynb' % basename )]
for candidate in candidates:
r = requests.get(candidate, verify=False)
if r.status_code == 200:
return candidate
# Check deployment_urls directly
for deployment_url in deployment_urls:
r = requests.get(deployment_url, verify=False)
if r.status_code == 200:
return deployment_url
return None
def generate_file_rst(app, src_dir, dest_dir, page, section, backend,
img_extension, skip, deployment_urls):
gallery_conf = app.config.nbsite_gallery_conf
content = gallery_conf['galleries'][page]
host = gallery_conf['host']
download_as = gallery_conf['download_as']
ref = gallery_conf['github_ref']
nblink = gallery_conf['nblink']
org = gallery_conf['github_org']
proj = gallery_conf['github_project']
examples_dir = gallery_conf['examples_dir']
skip_execute = gallery_conf['skip_execute']
endpoint = gallery_conf['deployment_url']
iframe_spinner = gallery_conf['iframe_spinner']
extensions = content.get('extensions', gallery_conf['default_extensions'])
components = [examples_dir.split(os.path.sep)[-1], page]
if section:
components.append(section)
if backend:
components.append(backend)
files = []
for extension in extensions:
files += glob.glob(os.path.join(src_dir, extension))
# Try to fetch all deployed examples
deployed_examples = []
if bs4 and endpoint is not None:
r = requests.get(endpoint, verify=False)
if r.status_code == 200:
soup = bs4.BeautifulSoup(r.content, features='lxml')
try:
deployed_examples = [l.text for l in soup.find('div', {"class": "list-group"}).find_all('h4')]
except:
deployed_examples = [l.get('id')[1:] for l in soup.find('ul', {"class": "cards-grid"}).find_all('a', {"class": "card-link"})]
if not deployed_examples:
deployed_examples = [l.text for l in soup.find('ul').find_all('a')]
for f in files:
if isinstance(skip, list) and os.path.basename(f) in skip:
continue
extension = f.split('.')[-1]
basename = os.path.basename(f)
rel_path = os.path.relpath(os.path.join(src_dir, basename), dest_dir)
rst_path = os.path.join(dest_dir, basename[:-len(extension)].replace(' ', '_') + 'rst')
name = basename[:-(len(extension)+1)]
title = ' '.join([n[0].capitalize()+n[1:] for n in name.replace('_', ' ').split(' ')])
deployed = name in deployed_examples
ftype = 'notebook' if extension == 'ipynb' else 'script'
if os.path.isfile(rst_path):
with open(rst_path) as existing:
if not 'Originally generated by nbsite' in existing.read():
continue
with open(rst_path, 'w') as rst_file:
prefix = '_'.join([p for p in (section, backend, 'gallery') if p])
rst_file.write('.. _%s_%s:\n\n' % (prefix, name))
rst_file.write(title+'\n')
rst_file.write('_'*len(title)+'\n\n')
deployed_file = get_deployed_url(deployment_urls, basename)
if nblink in ['top', 'both']:
add_nblink(rst_file, host, deployed_file, download_as,
org, proj, ref, components, basename, ftype, section)
rst_file.write('\n\n-------\n\n')
if ftype == 'notebook':
rst_file.write(".. notebook:: %s %s" % (proj, rel_path))
if deployed or (isinstance(skip, bool) and skip) or any(basename.strip().endswith(skipped) for skipped in skip_execute):
rst_file.write('\n :skip_execute: True\n')
if deployed:
rst_file.write(IFRAME_TEMPLATE.format(
background=iframe_spinner, url=endpoint+name))
else:
rst_file.write('.. literalinclude:: %s\n\n' % rel_path)
url = os.path.join('thumbnails', '%s.%s' % (name, img_extension))
rst_file.write('.. figure:: %s\n\n' % url)
if nblink in ['bottom', 'both']:
rst_file.write('\n\n-------\n\n')
add_nblink(rst_file, host, deployed_file, download_as,
org, proj, ref, components, basename, ftype, section)
def add_nblink(rst_file, host, deployed_file, download_as,
org, proj, ref, components, basename, ftype, section):
if deployed_file:
rst_file.write(f'`View a running version of this notebook. <{deployed_file}>`_ | ')
if host == 'GitHub' and org and proj:
rst_file.write('`Download this {ftype} from GitHub (right-click to download).'
' <https://raw.githubusercontent.com/{org}/{proj}/{ref}/{path}/{basename}>`_'.format(
org=org, proj=proj, ref=ref, path='/'.join(components),
basename=basename, ftype=ftype))
elif host == 'assets':
if download_as == 'project':
rst_file.write(f'`Download this project. </assets/{section}.zip>`_')
else:
rst_file.write('`Download this {ftype}. </assets/{path}/{basename}>`_'.format(
path='/'.join(components), basename=basename, ftype=ftype))
def _thumbnail_div(path_components, section, backend, fname, extension, normalize=True, title=None):
"""Generates RST to place a thumbnail in a gallery"""
if title is not None:
label = title
elif normalize:
label = fname.replace('_', ' ').title()
else:
label = fname
thumb = os.path.join(*path_components+['thumbnails', '%s.%s' % (fname, extension)])
# Inside rst files forward slash defines paths
thumb = thumb.replace(os.sep, "/")
prefix = '_'.join([pre for pre in (section, backend) if pre])
backend = backend+'_' if backend else ''
if prefix:
prefix += '_'
return THUMBNAIL_TEMPLATE.format(
backend=backend, prefix=prefix, thumbnail=thumb, ref_name=fname,
label=label)
def generate_gallery(app, page):
"""
Generates a gallery for all example directories specified in
the gallery_conf. Generates rst files for all found notebooks
and copies the notebooks to doc/gallery/ relative to the supplied
basepath. Also generates thumbnails and an overall index.
"""
# Get config
gallery_conf = app.config.nbsite_gallery_conf
content = gallery_conf['galleries'][page]
backends = content.get('backends', gallery_conf.get('backends', []))
titles = content.get('titles', {})
normalize = content.get('normalize_titles', True)
# Get directories
doc_dir = app.builder.srcdir
examples_dir = os.path.join(doc_dir, gallery_conf['examples_dir'])
gallery_dir = os.path.join(examples_dir, page)
static_dir = app.config.html_static_path[-1]
static_path = os.path.join(
os.path.split(gallery_conf['examples_dir'])[-2], static_dir)
labels_dir = gallery_conf['labels_dir']
labels_path = os.path.join(static_path, labels_dir)
logo = app.config.html_theme_options.get('logo', 'images/logo.png')
logo_path = os.path.join(static_path, logo)
if 'sections' in content:
sections = content['sections']
else:
sections = [s for s in glob.glob(os.path.join(gallery_dir, '*'))
if os.path.isdir(os.path.join(gallery_dir, s)) and
not any(s.endswith(b) for b in backends)]
if not sections:
sections = ['']
extensions = content.get('extensions', gallery_conf['default_extensions'])
sort_fn = gallery_conf['within_subsection_order']
thumbnail_url = gallery_conf['thumbnail_url']
download = gallery_conf['enable_download']
script_prefix = gallery_conf['script_prefix']
only_use_existing = gallery_conf['only_use_existing']
inline = gallery_conf['inline']
# Write gallery index
title = content['title']
gallery_rst = title + '\n' + '_'*len(title) + '\n'
if 'intro' in content:
gallery_rst += '\n' + content['intro'] + '\n'
if backends:
buttons = []
for n, backend in enumerate(backends):
buttons.append(BUTTON_TEMPLATE.format(N=n+1, checked='' if n else 'checked="checked"',
label=backend.capitalize()))
gallery_rst += BUTTON_GROUP_TEMPLATE.format(buttons=''.join(buttons), backends=backends)
if inline:
gallery_rst += INLINE_GALLERY_STYLE
for section in sections:
if isinstance(section, dict):
section_backends = section.get('backends', backends)
skip = section.get('skip', content.get('skip', False))
orphans = section.get('orphans', content.get('orphans', []))
heading = section.get('title', section['path'])
description = section.get('description', None)
labels = section.get('labels', [])
subsection_order = section.get('within_subsection_order', sort_fn)
deployment_urls = section.get('deployment_urls', [])
section = section['path']
else:
heading = section.title()
skip = content.get('skip', False)
orphans = content.get('orphans', [])
section_backends = backends
subsection_order = sort_fn
description = None
labels = []
deployment_urls = []
if not heading:
gallery_rst += '\n\n.. raw:: html\n\n <div class="section sphx-glr-section" id="section"></div><br>\n\n'
elif inline:
gallery_rst += f'\n\n.. toctree::\n :glob:\n :hidden:\n :maxdepth: 2\n\n {section}/*'
else:
underline = '-'*len(heading)
gallery_rst += f'\n\n{heading}\n{underline}\n\n'
if section:
gallery_rst += f'\n\n.. toctree::\n :glob:\n :hidden:\n\n {heading}\n {section}/*\n\n'
else:
gallery_rst += f'\n\n.. toctree::\n :glob:\n :hidden:\n\n {heading}\n *\n\n'
if labels:
gallery_rst += '\n\n.. raw:: html\n\n'
| |
import sqlite3
import pandas
import itertools
import networkx as nx
from gtfspy.gtfs import GTFS
from gtfspy.util import timeit
from scripts.all_to_all_settings import *
def attach_database(conn, other_db_path, name="other"):
cur = conn.cursor()
cur.execute("ATTACH '%s' AS '%s'" % (str(other_db_path), name))
cur.execute("PRAGMA database_list")
print("other database attached:", cur.fetchall())
return conn
"""
AllToAllDifferenceAnalyzer calculates the difference between various summary statistics of temporal distance and number
of boardings, stores the values in a database and handles calls to this database.
"""
def stops_to_exclude(return_sqlite_list=False):
gtfs_lm = GTFS(LM_DICT["gtfs_dir"])
areas_to_remove = gtfs_lm.execute_custom_query_pandas(
"SELECT * FROM stops WHERE CASE WHEN substr(stop_id,1, 5) = '__b__' THEN CAST(substr(stop_id,6, 1) AS integer) ELSE CAST(substr(stop_id,1, 1) AS integer) END >4")
if return_sqlite_list:
return "(" + ",".join([str(x) for x in areas_to_remove["stop_I"].tolist()]) + ")"
return areas_to_remove
class AllToAllDifferenceAnalyzer:
def __init__(self, gtfs_path, before_db_path, after_db_path, output_db):
self.gtfs = GTFS(gtfs_path)
print(output_db)
self._create_indecies(before_db_path)
self._create_indecies(after_db_path)
self.conn = sqlite3.connect(output_db)
self.conn = attach_database(self.conn, before_db_path, name="before")
self.conn = attach_database(self.conn, after_db_path, name="after")
def _create_indecies(self, db_path):
conn = sqlite3.connect(db_path)
cur = conn.cursor()
for table in ["journey_duration", "n_boardings", "temporal_distance"]:
query = """CREATE INDEX IF NOT EXISTS %s_from_stop_I_idx ON %s (from_stop_I);
CREATE INDEX IF NOT EXISTS %s_to_stop_I_idx ON %s (to_stop_I);""" % (table, table, table, table)
conn.commit()
def diff_table(self, groupby="to_stop_I", measure="temporal_distance", ignore_stops=None):
"""
Creates a table with the before-after difference of mean, min and max temporal distance or number of boardings
on a stop to stop basis
:return:
"""
cur = self.conn.cursor()
query = """DROP TABLE IF EXISTS diff_{groupby}_{measure}""".format(measure=measure, groupby=groupby)
cur.execute(query)
multiplier = 1
first = 0.5
second = 1
third = 1.5
threshold = 10800 # threshold for change in mean temporal distance
if measure == "temporal_distance" or "journey_duration":
multiplier = 60
first = 5
second = 10
third = 20
first_str = str(first).replace(".", "_")
second_str = str(second).replace(".", "_")
third_str = str(third).replace(".", "_")
if ignore_stops:
ignore_stops = " AND t1.to_stop_I NOT IN " + ignore_stops + " AND t1.from_stop_I NOT IN " + ignore_stops
else:
ignore_stops = ""
query = """CREATE TABLE IF NOT EXISTS diff_{groupby}_{measure} ({groupby} INT, min_diff_mean REAL, mean_diff_mean REAL,
max_diff_mean REAL, incr_count_over_{0} INT, incr_count_over_{1} INT, incr_count_over_{2} INT,
decr_count_over_{0} INT, decr_count_over_{1} INT, decr_count_over_{2} INT )
""".format(first_str, second_str, third_str,
measure=measure, groupby=groupby)
cur.execute(query)
query = """INSERT OR REPLACE INTO diff_{groupby}_{measure} ({groupby}, min_diff_mean, mean_diff_mean, max_diff_mean,
incr_count_over_{first_str}, incr_count_over_{second_str}, incr_count_over_{third_str},
decr_count_over_{first_str}, decr_count_over_{second_str}, decr_count_over_{third_str})
SELECT {groupby}, min(diff_mean) AS min_diff_mean, avg(diff_mean) AS mean_diff_mean,
max(diff_mean) AS max_diff_mean,
sum(CASE WHEN diff_mean >= {0}*{multiplier} THEN 1 ELSE 0 END) AS incr_count_over_{first_str},
sum(CASE WHEN diff_mean >= {1}*{multiplier} THEN 1 ELSE 0 END) AS incr_count_over_{second_str},
sum(CASE WHEN diff_mean >= {2}*{multiplier} THEN 1 ELSE 0 END) AS incr_count_over_{third_str},
sum(CASE WHEN diff_mean <= -{0}*{multiplier} THEN 1 ELSE 0 END) AS decr_count_over_{first_str},
sum(CASE WHEN diff_mean <= -{1}*{multiplier} THEN 1 ELSE 0 END) AS decr_count_over_{second_str},
sum(CASE WHEN diff_mean <= -{2}*{multiplier} THEN 1 ELSE 0 END) AS decr_count_over_{third_str}
FROM
(SELECT t1.from_stop_I AS from_stop_I, t1.to_stop_I AS to_stop_I, t2.mean-t1.mean AS diff_mean
FROM before.{measure} AS t1, after.{measure} AS t2
WHERE t1.from_stop_I = t2.from_stop_I AND t1.to_stop_I = t2.to_stop_I {ignore_stops}
AND abs(t2.mean-t1.mean) < {threshold}) q1
GROUP BY {groupby}""".format(first, second, third,
first_str=first_str, second_str=second_str, third_str=third_str,
measure=measure,
groupby=groupby, multiplier=multiplier, threshold=threshold,
ignore_stops=ignore_stops)
cur.execute(query)
self.conn.commit()
def get_mean_change_for_all_targets(self, groupby="to_stop_I", measure="temporal_distance", ignore_stops=None):
"""
Returns pre generated differences table as pandas DataFrame
:param groupby: "to_stop_I" or "from_stop_I" designating if calculating the measure to the target or from the target
:param measure: "temporal_distance", "n_boardings",
:return:
if ignore_stops:
ignore_stops = " WHERE " + groupby + " IN " + ignore_stops
else:
ignore_stops = ""
"""
query = """SELECT * FROM diff_{groupby}_{measure}""".format(measure=measure, groupby=groupby)
print("running query")
df = pandas.read_sql_query(query, self.conn)
df = self.gtfs.add_coordinates_to_df(df, stop_id_column=groupby, lat_name="lat", lon_name="lon")
if measure == "temporal_distance":
df["mean_diff_mean"] = df["mean_diff_mean"].apply(lambda x: x / 60)
return df
def extreme_change_od_pairs(self, threshold):
"""
Returns O-D pairs where the absolute change is larger than the threshold. Returns increase in travel time with
positive thresholds and decrease in travel time with negative thresholds
:param threshold: int
:return: Pandas DataFrame
"""
if threshold < 0:
string_to_add = " <= " + str(threshold)
else:
string_to_add = " >= " + str(threshold)
query = """SELECT t1.from_stop_I AS from_stop_I, t1.to_stop_I AS to_stop_I, t2.mean-t1.mean AS diff_mean
FROM before.temporal_distance AS t1, after.temporal_distance AS t2
WHERE t1.from_stop_I = t2.from_stop_I AND t1.to_stop_I = t2.to_stop_I
AND t2.mean-t1.mean %s AND t2.mean-t1.mean < 10800""" % (string_to_add,)
df = pandas.read_sql_query(query, self.conn)
return df
def get_global_mean_change(self, measure, threshold=10800, ignore_stops=False):
ignore_list = ""
if ignore_stops:
ignore_list=stops_to_exclude(return_sqlite_list=True)
query = """SELECT before_global_mean, after_global_mean, after_global_mean-before_global_mean AS global_mean_difference FROM
(SELECT avg(mean) AS before_global_mean FROM before.{measure} WHERE mean <= {threshold} AND mean >0
AND from_stop_I NOT IN {ignore_stops} AND to_stop_I NOT IN {ignore_stops}) t1,
(SELECT avg(mean) AS after_global_mean FROM after.{measure} WHERE mean <= {threshold} AND mean >0
AND from_stop_I NOT IN {ignore_stops} AND to_stop_I NOT IN {ignore_stops}) t2
""".format(measure=measure, threshold=threshold, ignore_stops=ignore_list)
df = pandas.read_sql_query(query, self.conn)
return df
@timeit
def get_rows_with_abs_change_greater_than_n(self, stops, measure, n, sign, unit="s"):
stops = ",".join([str(x) for x in stops])
divisors = {"s": 1, "m": 60, "h": 3600}
divisor = divisors[unit]
query = """SELECT t1.{measure}/{divisor} AS before_{measure}, t2.{measure}/{divisor} AS after_{measure},
(t2.{measure}-t1.{measure})/{divisor} AS diff_{measure} FROM before.temporal_distance AS t1,
after.temporal_distance AS t2
WHERE t1.from_stop_I != t1.to_stop_I AND t1.from_stop_I = t2.from_stop_I
AND t1.to_stop_I = t2.to_stop_I AND t1.from_stop_I NOT IN ({stops})
AND t2.to_stop_I NOT IN ({stops})
AND t2.{measure}-t1.{measure} {sign} {n}""".format(measure=measure,
divisor=divisor,
stops=stops,
n=n,
sign=sign)
df = pandas.read_sql_query(query, self.conn)
return df
@timeit
def get_rows_based_on_stop_list(self, from_stops, to_stops, measure, measure_mode, unit="s"):
"""
:param from_stops: list
:param to_stops: list
:param measure: string (mean, min, max, median)
:param unit: string
:param measure_mode: string
:return:
"""
assert measure_mode in ["n_boardings", "temporal_distance"]
from_stops = ",".join([str(x) for x in from_stops])
to_stops = ",".join([str(x) for x in to_stops])
divisors = {"s": 1, "m": 60, "h": 3600}
divisor = divisors[unit]
query = """SELECT t1.{measure}/{divisor} AS before_{measure}, t2.{measure}/{divisor} AS after_{measure},
(t2.{measure}-t1.{measure})/{divisor} AS diff_{measure} FROM before.{mode} AS t1,
after.{mode} AS t2
WHERE t1.from_stop_I != t1.to_stop_I AND t1.from_stop_I = t2.from_stop_I
AND t1.to_stop_I = t2.to_stop_I AND t1.from_stop_I IN ({from_stops})
AND t2.to_stop_I IN ({to_stops})""".format(measure=measure,
mode=measure_mode,
divisor=divisor,
from_stops=from_stops,
to_stops=to_stops)
df = pandas.read_sql_query(query, self.conn)
return df
def get_data_for_target(self, target, measure, direction="to", threshold=10800, unit="s", ignore_stops=False):
divisors = {"s": 1, "m": 60, "h": 3600}
divisor = divisors[unit]
ignore_list = ""
if ignore_stops:
ignore_list = stops_to_exclude(return_sqlite_list=True)
ignore_list = " AND t1.from_stop_I NOT IN {ignore_list} AND t1.to_stop_I NOT IN {ignore_list}".format(ignore_list=ignore_list)
query = """SELECT t1.from_stop_I, t1.to_stop_I, t1.mean/{divisor} AS before_mean, t2.mean/{divisor} AS after_mean,
(t2.mean-t1.mean)/{divisor} AS diff_mean, COALESCE((t2.mean/t1.mean)- 1, 0) AS diff_mean_relative
FROM before.{measure} t1, after.{measure} t2
WHERE t1.from_stop_I=t2.from_stop_I AND t1.to_stop_I=t2.to_stop_I AND t1.mean <= {threshold}
AND t2.mean <= {threshold}
AND t1.{direction}_stop_I={target} {ignore_list}""".format(measure=measure,
target=target,
direction=direction,
threshold=threshold,
divisor=divisor,
ignore_list=ignore_list)
df = pandas.read_sql_query(query, self.conn)
return df
def get_mean_change(self, measure, threshold=10800, descening_order=False, include_list=None):
if descening_order:
order_by = "DESC"
else:
order_by = "ASC"
include_list = "(" + ",".join([str(x) for x in include_list]) + ")"
query = """SELECT t1.to_stop_I, t2.mean AS before, t2.mean-t1.mean AS diff_mean FROM
(SELECT to_stop_I, avg(mean) AS mean FROM before.{measure}
WHERE mean <= {threshold} AND to_stop_I IN {include_list}
GROUP BY to_stop_I) t1,
(SELECT to_stop_I, avg(mean) AS mean FROM after.{measure}
WHERE mean <= {threshold} AND to_stop_I IN {include_list}
GROUP BY to_stop_I) t2
WHERE t1.to_stop_I=t2.to_stop_I
ORDER BY diff_mean {order_by}
""".format(measure=measure,
threshold=threshold,
order_by=order_by,
include_list=include_list)
df = pandas.read_sql_query(query, self.conn)
return df
def get_n_winning_targets_using_change_in_mean(self, n, measure, distance=500, threshold=10800, losers=False, include_list=None):
if losers:
order_by = "DESC"
else:
order_by = "ASC"
include_list = "(" + ",".join([str(x) for x in include_list]) + ")"
query = """SELECT t1.to_stop_I, t2.mean-t1.mean AS diff_mean FROM
(SELECT to_stop_I, avg(mean) AS mean FROM before.{measure}
WHERE mean <= {threshold} AND to_stop_I IN {include_list}
GROUP BY to_stop_I) t1,
(SELECT to_stop_I, avg(mean) AS mean FROM after.{measure}
WHERE mean <= {threshold} AND to_stop_I IN {include_list}
GROUP BY to_stop_I) t2
WHERE t1.to_stop_I=t2.to_stop_I
ORDER BY diff_mean {order_by}
""".format(measure=measure,
threshold=threshold,
order_by=order_by,
include_list=include_list)
df = pandas.read_sql_query(query, self.conn)
# exclude nearby stops
nearby_excluded_stops = []
stops_remaining = []
gtfs = GTFS(GTFS_PATH)
for value in df.itertuples():
if not value.to_stop_I in nearby_excluded_stops:
exclude_df = gtfs.get_stops_within_distance(value.to_stop_I, distance)
nearby_excluded_stops += list(exclude_df["stop_I"])
stops_remaining.append(value.to_stop_I)
if len(stops_remaining) == n:
break
df = df.loc[df['to_stop_I'].isin(stops_remaining)]
return df
def n_inf_stops_per_stop(self, measure, indicator, | |
import abc
import asyncio
import base64
import hashlib
import keyword
import os
import re
from contextlib import contextmanager
from pathlib import Path
from types import MappingProxyType
from typing import ( # noqa
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Container,
Dict,
Generator,
Iterable,
Iterator,
List,
Mapping,
Optional,
Set,
Sized,
Tuple,
Type,
Union,
cast,
)
from yarl import URL
from . import hdrs
from .abc import AbstractMatchInfo, AbstractRouter, AbstractView
from .helpers import DEBUG
from .http import HttpVersion11
from .typedefs import PathLike
from .web_exceptions import (
HTTPException,
HTTPExpectationFailed,
HTTPForbidden,
HTTPMethodNotAllowed,
HTTPNotFound,
)
from .web_fileresponse import FileResponse
from .web_request import Request
from .web_response import Response, StreamResponse
from .web_routedef import AbstractRouteDef
__all__ = ('UrlDispatcher', 'UrlMappingMatchInfo',
'AbstractResource', 'Resource', 'PlainResource', 'DynamicResource',
'AbstractRoute', 'ResourceRoute',
'StaticResource', 'View')
if TYPE_CHECKING: # pragma: no cover
from .web_app import Application # noqa
BaseDict = Dict[str, str]
else:
BaseDict = dict
HTTP_METHOD_RE = re.compile(r"^[0-9A-Za-z!#\$%&'\*\+\-\.\^_`\|~]+$")
ROUTE_RE = re.compile(r'(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})')
PATH_SEP = re.escape('/')
_WebHandler = Callable[[Request], Awaitable[StreamResponse]]
_ExpectHandler = Callable[[Request], Awaitable[None]]
_Resolve = Tuple[Optional[AbstractMatchInfo], Set[str]]
class AbstractResource(Sized, Iterable['AbstractRoute']):
def __init__(self, *, name: Optional[str]=None) -> None:
self._name = name
@property
def name(self) -> Optional[str]:
return self._name
@property
@abc.abstractmethod
def canonical(self) -> str:
"""Exposes the resource's canonical path.
For example '/foo/bar/{name}'
"""
@abc.abstractmethod # pragma: no branch
def url_for(self, **kwargs: str) -> URL:
"""Construct url for resource with additional params."""
@abc.abstractmethod # pragma: no branch
async def resolve(self, request: Request) -> _Resolve:
"""Resolve resource
Return (UrlMappingMatchInfo, allowed_methods) pair."""
@abc.abstractmethod
def add_prefix(self, prefix: str) -> None:
"""Add a prefix to processed URLs.
Required for subapplications support.
"""
@abc.abstractmethod
def get_info(self) -> Dict[str, Any]:
"""Return a dict with additional info useful for introspection"""
def freeze(self) -> None:
pass
@abc.abstractmethod
def raw_match(self, path: str) -> bool:
"""Perform a raw match against path"""
class AbstractRoute(abc.ABC):
def __init__(self, method: str,
handler: Union[_WebHandler, Type[AbstractView]], *,
expect_handler: _ExpectHandler=None,
resource: AbstractResource=None) -> None:
if expect_handler is None:
expect_handler = _default_expect_handler
assert asyncio.iscoroutinefunction(expect_handler), \
'Coroutine is expected, got {!r}'.format(expect_handler)
method = method.upper()
if not HTTP_METHOD_RE.match(method):
raise ValueError("{} is not allowed HTTP method".format(method))
if asyncio.iscoroutinefunction(handler):
pass
elif isinstance(handler, type) and issubclass(handler, AbstractView):
pass
else:
raise TypeError("Only async functions are allowed as web-handlers "
", got {!r}".format(handler))
self._method = method
self._handler = handler
self._expect_handler = expect_handler
self._resource = resource
@property
def method(self) -> str:
return self._method
@property
def handler(self) -> _WebHandler:
return self._handler
@property
@abc.abstractmethod
def name(self) -> Optional[str]:
"""Optional route's name, always equals to resource's name."""
@property
def resource(self) -> Optional[AbstractResource]:
return self._resource
@abc.abstractmethod
def get_info(self) -> Dict[str, Any]:
"""Return a dict with additional info useful for introspection"""
@abc.abstractmethod # pragma: no branch
def url_for(self, *args: str, **kwargs: str) -> URL:
"""Construct url for route with additional params."""
async def handle_expect_header(self, request: Request) -> None:
await self._expect_handler(request)
class UrlMappingMatchInfo(BaseDict, AbstractMatchInfo):
def __init__(self, match_dict: Dict[str, str], route: AbstractRoute):
super().__init__(match_dict)
self._route = route
self._apps = [] # type: List[Application]
self._current_app = None # type: Optional[Application]
self._frozen = False
@property
def handler(self) -> _WebHandler:
return self._route.handler
@property
def route(self) -> AbstractRoute:
return self._route
@property
def expect_handler(self) -> _ExpectHandler:
return self._route.handle_expect_header
@property
def http_exception(self) -> Optional[HTTPException]:
return None
def get_info(self) -> Dict[str, str]:
return self._route.get_info()
@property
def apps(self) -> Tuple['Application', ...]:
return tuple(self._apps)
def add_app(self, app: 'Application') -> None:
if self._frozen:
raise RuntimeError("Cannot change apps stack after .freeze() call")
if self._current_app is None:
self._current_app = app
self._apps.insert(0, app)
@property
def current_app(self) -> 'Application':
app = self._current_app
assert app is not None
return app
@contextmanager
def set_current_app(self,
app: 'Application') -> Generator[None, None, None]:
if DEBUG: # pragma: no cover
if app not in self._apps:
raise RuntimeError(
"Expected one of the following apps {!r}, got {!r}"
.format(self._apps, app))
prev = self._current_app
self._current_app = app
try:
yield
finally:
self._current_app = prev
def freeze(self) -> None:
self._frozen = True
def __repr__(self) -> str:
return "<MatchInfo {}: {}>".format(super().__repr__(), self._route)
class MatchInfoError(UrlMappingMatchInfo):
def __init__(self, http_exception: HTTPException) -> None:
self._exception = http_exception
super().__init__({}, SystemRoute(self._exception))
@property
def http_exception(self) -> HTTPException:
return self._exception
def __repr__(self) -> str:
return "<MatchInfoError {}: {}>".format(self._exception.status,
self._exception.reason)
async def _default_expect_handler(request: Request) -> None:
"""Default handler for Expect header.
Just send "100 Continue" to client.
raise HTTPExpectationFailed if value of header is not "100-continue"
"""
expect = request.headers.get(hdrs.EXPECT)
if request.version == HttpVersion11:
if expect.lower() == "100-continue":
await request.writer.write(b"HTTP/1.1 100 Continue\r\n\r\n")
else:
raise HTTPExpectationFailed(text="Unknown Expect: %s" % expect)
class Resource(AbstractResource):
def __init__(self, *, name: Optional[str]=None) -> None:
super().__init__(name=name)
self._routes = [] # type: List[ResourceRoute]
def add_route(self, method: str,
handler: Union[Type[AbstractView], _WebHandler], *,
expect_handler: Optional[_ExpectHandler]=None
) -> 'ResourceRoute':
for route_obj in self._routes:
if route_obj.method == method or route_obj.method == hdrs.METH_ANY:
raise RuntimeError("Added route will never be executed, "
"method {route.method} is already "
"registered".format(route=route_obj))
route_obj = ResourceRoute(method, handler, self,
expect_handler=expect_handler)
self.register_route(route_obj)
return route_obj
def register_route(self, route: 'ResourceRoute') -> None:
assert isinstance(route, ResourceRoute), \
'Instance of Route class is required, got {!r}'.format(route)
self._routes.append(route)
async def resolve(self, request: Request) -> _Resolve:
allowed_methods = set() # type: Set[str]
match_dict = self._match(request.rel_url.raw_path)
if match_dict is None:
return None, allowed_methods
for route_obj in self._routes:
route_method = route_obj.method
allowed_methods.add(route_method)
if (route_method == request.method or
route_method == hdrs.METH_ANY):
return (UrlMappingMatchInfo(match_dict, route_obj),
allowed_methods)
else:
return None, allowed_methods
@abc.abstractmethod
def _match(self, path: str) -> Optional[Dict[str, str]]:
pass # pragma: no cover
def __len__(self) -> int:
return len(self._routes)
def __iter__(self) -> Iterator[AbstractRoute]:
return iter(self._routes)
# TODO: implement all abstract methods
class PlainResource(Resource):
def __init__(self, path: str, *, name: Optional[str]=None) -> None:
super().__init__(name=name)
assert not path or path.startswith('/')
self._path = path
@property
def canonical(self) -> str:
return self._path
def freeze(self) -> None:
if not self._path:
self._path = '/'
def add_prefix(self, prefix: str) -> None:
assert prefix.startswith('/')
assert not prefix.endswith('/')
assert len(prefix) > 1
self._path = prefix + self._path
def _match(self, path: str) -> Optional[Dict[str, str]]:
# string comparison is about 10 times faster than regexp matching
if self._path == path:
return {}
else:
return None
def raw_match(self, path: str) -> bool:
return self._path == path
def get_info(self) -> Dict[str, Any]:
return {'path': self._path}
def url_for(self) -> URL: # type: ignore
return URL.build(path=self._path, encoded=True)
def __repr__(self) -> str:
name = "'" + self.name + "' " if self.name is not None else ""
return "<PlainResource {name} {path}>".format(name=name,
path=self._path)
class DynamicResource(Resource):
DYN = re.compile(r'\{(?P<var>[_a-zA-Z][_a-zA-Z0-9]*)\}')
DYN_WITH_RE = re.compile(
r'\{(?P<var>[_a-zA-Z][_a-zA-Z0-9]*):(?P<re>.+)\}')
GOOD = r'[^{}/]+'
def __init__(self, path: str, *, name: Optional[str]=None) -> None:
super().__init__(name=name)
pattern = ''
formatter = ''
for part in ROUTE_RE.split(path):
match = self.DYN.fullmatch(part)
if match:
pattern += '(?P<{}>{})'.format(match.group('var'), self.GOOD)
formatter += '{' + match.group('var') + '}'
continue
match = self.DYN_WITH_RE.fullmatch(part)
if match:
pattern += '(?P<{var}>{re})'.format(**match.groupdict())
formatter += '{' + match.group('var') + '}'
continue
if '{' in part or '}' in part:
raise ValueError("Invalid path '{}'['{}']".format(path, part))
path = URL.build(path=part).raw_path
formatter += path
pattern += re.escape(path)
try:
compiled = re.compile(pattern)
except re.error as exc:
raise ValueError(
"Bad pattern '{}': {}".format(pattern, exc)) from None
assert compiled.pattern.startswith(PATH_SEP)
assert formatter.startswith('/')
self._pattern = compiled
self._formatter = formatter
@property
def canonical(self) -> str:
return self._formatter
def add_prefix(self, prefix: str) -> None:
assert prefix.startswith('/')
assert not prefix.endswith('/')
assert len(prefix) > 1
self._pattern = re.compile(re.escape(prefix)+self._pattern.pattern)
self._formatter = prefix + self._formatter
def _match(self, path: str) -> Optional[Dict[str, str]]:
match = self._pattern.fullmatch(path)
if match is None:
return None
else:
return {key: URL.build(path=value, encoded=True).path
for key, value in match.groupdict().items()}
def raw_match(self, path: str) -> bool:
return self._formatter == path
def get_info(self) -> Dict[str, Any]:
return {'formatter': self._formatter,
'pattern': self._pattern}
def url_for(self, **parts: str) -> URL:
url = self._formatter.format_map({k: URL.build(path=v).raw_path
for k, v in parts.items()})
return URL.build(path=url)
def __repr__(self) -> str:
name = "'" + self.name + "' " if self.name is not None else ""
return ("<DynamicResource {name} {formatter}>"
.format(name=name, formatter=self._formatter))
class PrefixResource(AbstractResource):
def __init__(self, prefix: str, *, name: Optional[str]=None) -> None:
assert not prefix or prefix.startswith('/'), prefix
assert prefix in ('', '/') or not prefix.endswith('/'), prefix
super().__init__(name=name)
self._prefix = URL.build(path=prefix).raw_path
@property
def canonical(self) -> str:
return self._prefix
def add_prefix(self, prefix: str) -> None:
assert prefix.startswith('/')
assert not prefix.endswith('/')
assert len(prefix) > 1
self._prefix = prefix + self._prefix
def raw_match(self, prefix: str) -> bool:
return False
# TODO: impl missing abstract methods
class StaticResource(PrefixResource):
VERSION_KEY = 'v'
def __init__(self, prefix: str, directory: PathLike,
*, name: Optional[str]=None,
expect_handler: Optional[_ExpectHandler]=None,
chunk_size: int=256 * 1024,
show_index: bool=False, follow_symlinks: bool=False,
append_version: bool=False) -> None:
super().__init__(prefix, name=name)
try:
directory = Path(directory)
| |
success + " " + author + " (" + ", ".join(commit_hashes) + ")</li>"
committers_comment += "</ul>"
if num_missing > 0:
support_url = "https://jira.linuxfoundation.org/servicedesk/customer/portal/4"
# Group commits by author.
committers = {}
# Consider the case where github Id does not exist
for commit, author in missing:
if author[0] is None:
author[1] = "Unknown"
if author[1] not in committers:
committers[author[1]] = []
committers[author[1]].append(commit)
# Check case for whitelisted unsigned user
if len(author) == 4:
committers[author[1]].append(True)
# Print author commit information.
committers_comment += "<ul>"
github_help_url = "https://help.github.com/en/github/committing-changes-to-your-project/why-are-my-commits-linked-to-the-wrong-user"
for author, commit_hashes in committers.items():
if author == "Unknown":
committers_comment += (
f"<li> {failed} The commit ({' ,'.join(commit_hashes)}) "
+ f"is missing the User's ID, preventing the EasyCLA check. "
+ f"<a href='{github_help_url}' target='_blank'>Consult GitHub Help</a> to resolve."
+ f"For further assistance with EasyCLA, "
+ f"<a href='{support_url}' target='_blank'>please submit a support request ticket</a>."
+ "</li>"
)
else:
if True in commit_hashes:
committers_comment += (
f"<li>{author} ({' ,'.join(commit_hashes[:-1])}) "
+ f"is authorized, but they must confirm their affiliation with their company. "
+ f"Start the authorization process "
+ f"<a href='{sign_url}' target='_blank'> by clicking here</a>, click \"Corporate\","
+ f"select the appropriate company from the list, then confirm "
+ f"your affiliation on the page that appears. "
+ f"For further assistance with EasyCLA, "
+ f"<a href='{support_url}' target='_blank'>please submit a support request ticket</a>."
+ "</li>"
)
else:
committers_comment += (
f"<li>"
+ f"<a href='{sign_url}' target='_blank'>{failed}</a> - "
+ f"{author} The commit ({' ,'.join(commit_hashes)}) is not authorized under a signed CLA. "
+ f"<a href='{sign_url}' target='_blank'>Please click here to be authorized</a>. "
+ f"For further assistance with EasyCLA, "
+ f"<a href='{support_url}' target='_blank'>please submit a support request ticket</a>."
+ "</li>"
)
committers_comment += "</ul>"
return committers_comment
text = "The committers are authorized under a signed CLA."
return text + committers_comment
def get_authorization_url_and_state(client_id, redirect_uri, scope, authorize_url):
"""
Helper function to get an OAuth2 session authorization URL and state.
:param client_id: The client ID for this OAuth2 session.
:type client_id: string
:param redirect_uri: The redirect URI to specify in this OAuth2 session.
:type redirect_uri: string
:param scope: The list of scope items to use for this OAuth2 session.
:type scope: [string]
:param authorize_url: The URL to submit the OAuth2 request.
:type authorize_url: string
"""
fn = 'utils.get_authorization_url_and_state'
oauth = OAuth2Session(client_id, redirect_uri=redirect_uri, scope=scope)
authorization_url, state = oauth.authorization_url(authorize_url)
cla.log.debug(f'{fn} - initialized a new oauth session '
f'using the github oauth client id: {client_id[0:5]}... '
f'with the redirect_uri: {redirect_uri} '
f'using scope of: {scope}. Obtained the '
f'state: {state} and the '
f'generated authorization_url: {authorize_url}')
return authorization_url, state
def fetch_token(client_id, state, token_url, client_secret, code,
redirect_uri=None): # pylint: disable=too-many-arguments
"""
Helper function to fetch a OAuth2 session token.
:param client_id: The client ID for this OAuth2 session.
:type client_id: string
:param state: The OAuth2 session state.
:type state: string
:param token_url: The token URL for this OAuth2 session.
:type token_url: string
:param client_secret: the client secret
:type client_secret: string
:param code: The OAuth2 session code.
:type code: string
:param redirect_uri: The redirect URI for this OAuth2 session.
:type redirect_uri: string
"""
fn = 'utils.fetch_token'
if redirect_uri is not None:
oauth2 = OAuth2Session(client_id, state=state, scope=['user:email'], redirect_uri=redirect_uri)
else:
oauth2 = OAuth2Session(client_id, state=state, scope=['user:email'])
cla.log.debug(f'{fn} - oauth2.fetch_token - '
f'token_url: {token_url}, '
f'client_id: {client_id}, '
f'client_secret: {client_secret}, '
f'code: {code}')
return oauth2.fetch_token(token_url, client_secret=client_secret, code=code)
def redirect_user_by_signature(user, signature):
"""
Helper method to redirect a user based on their signature status and return_url.
:param user: The user object for this redirect.
:type user: cla.models.model_interfaces.User
:param signature: The signature object for this user.
:type signature: cla.models.model_interfaces.Signature
"""
return_url = signature.get_signature_return_url()
if signature.get_signature_signed() and signature.get_signature_approved():
# Signature already signed and approved.
# TODO: Notify user of signed and approved signature somehow.
cla.log.info('Signature already signed and approved for user: %s, %s',
user.get_user_emails(), signature.get_signature_id())
if return_url is None:
cla.log.info('No return_url set in signature object - serving success message')
return {'status': 'signed and approved'}
else:
cla.log.info('Redirecting user back to %s', return_url)
raise falcon.HTTPFound(return_url)
elif signature.get_signature_signed():
# Awaiting approval.
# TODO: Notify user of pending approval somehow.
cla.log.info('Signature signed but not approved yet: %s',
signature.get_signature_id())
if return_url is None:
cla.log.info('No return_url set in signature object - serving pending message')
return {'status': 'pending approval'}
else:
cla.log.info('Redirecting user back to %s', return_url)
raise falcon.HTTPFound(return_url)
else:
# Signature awaiting signature.
sign_url = signature.get_signature_sign_url()
signature_id = signature.get_signature_id()
cla.log.info('Signature exists, sending user to sign: %s (%s)', signature_id, sign_url)
raise falcon.HTTPFound(sign_url)
def get_active_signature_metadata(user_id):
"""
When a user initiates the signing process, the CLA system must store information on this
signature - such as where the user came from, what repository it was initiated on, etc.
This information is temporary while the signature is in progress. See the Signature object
for information on this signature once the signing is complete.
:param user_id: The ID of the user in question.
:type user_id: string
:return: Dict of data on the signature request from this user.
:rtype: dict
"""
store = get_key_value_store_service()
key = 'active_signature:' + str(user_id)
if store.exists(key):
return json.loads(store.get(key))
return None
def set_active_signature_metadata(user_id, project_id, repository_id, pull_request_id):
"""
When a user initiates the signing process, the CLA system must store information on this
signature - such as where the user came from, what repository it was initiated on, etc.
This is a helper function to perform the storage of this information.
:param user_id: The ID of the user beginning the signing process.
:type user_id: string
:param project_id: The ID of the project this signature is for.
:type project_id: string
:param repository_id: The repository where the signature is coming from.
:type repository_id: string
:param pull_request_id: The PR where this signature request is coming from (where the user
clicked on the 'Sign CLA' badge).
:type pull_request_id: string
"""
store = get_key_value_store_service()
key = 'active_signature:' + str(user_id) # Should have been set when user initiated the signature.
value = json.dumps({'user_id': user_id,
'project_id': project_id,
'repository_id': repository_id,
'pull_request_id': pull_request_id})
store.set(key, value)
cla.log.info('Stored active signature details for user %s: Key - %s Value - %s', user_id, key, value)
def delete_active_signature_metadata(user_id):
"""
Helper function to delete all metadata regarding the active signature request for the user.
:param user_id: The ID of the user in question.
:type user_id: string
"""
store = get_key_value_store_service()
key = 'active_signature:' + str(user_id)
store.delete(key)
cla.log.info('Deleted stored active signature details for user %s', user_id)
def get_active_signature_return_url(user_id, metadata=None):
"""
Helper function to get a user's active signature return URL.
:param user_id: The user ID in question.
:type user_id: string
:param metadata: The signature metadata
:type metadata: dict
:return: The URL the user will be redirected to upon successful signature.
:rtype: string
"""
if metadata is None:
metadata = get_active_signature_metadata(user_id)
if metadata is None:
cla.log.warning('Could not find active signature for user {}, return URL request failed'.format(user_id))
return None
# Get Github ID from metadata
github_repository_id = metadata['repository_id']
# Get installation id through a helper function
installation_id = get_installation_id_from_github_repository(github_repository_id)
if installation_id is None:
cla.log.error('Could not find installation ID that is configured for this repository ID: %s',
github_repository_id)
return None
github = cla.utils.get_repository_service('github')
return github.get_return_url(metadata['repository_id'],
metadata['pull_request_id'],
installation_id)
def get_installation_id_from_github_repository(github_repository_id):
# Get repository ID that references the github ID.
try:
repository = Repository().get_repository_by_external_id(github_repository_id, 'github')
except DoesNotExist:
return None
# Get Organization from this repository
organization = GitHubOrg()
try:
organization.load(repository.get_repository_organization_name())
except DoesNotExist:
return None
# Get this organization's installation ID
return organization.get_organization_installation_id()
def get_project_id_from_github_repository(github_repository_id):
# Get repository ID that references the github ID.
try:
repository = Repository().get_repository_by_external_id(github_repository_id, 'github')
except DoesNotExist:
return None
# Get project ID (contract group ID) of this repository
return repository.get_repository_project_id()
def get_individual_signature_callback_url(user_id, metadata=None):
"""
Helper function to get a user's active signature callback URL.
:param user_id: The user ID in question.
:type user_id: string
:param metadata: The signature metadata
:type metadata: dict
:return: The callback URL that will be hit by the signing service provider.
:rtype: string
"""
if metadata is None:
metadata = get_active_signature_metadata(user_id)
if metadata is None:
cla.log.warning('Could not find active signature for user {}, callback URL request failed'.format(user_id))
return None
# Get Github ID | |
"""
repurpose_trans_rules_agrosuccess.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The objectives of this script are are:
1. Map land cover types represented in Millington 2009 to land cover types
represented in AgroSuccess.
2. Identify land cover types present in Millington 2009 and not in AgroSuccess,
and vice versa.
3. Reconcile these differences to produce a succession transition table for
agrosuccess: `agrosuccess_succession.csv`. This can be consumed by cymod to
create a land cover transition graph. Variable and state names (rather than
codes) should be used in this table.
- Input is the file ../data/tmp/millington_succession.csv
- Output is the file ../data/created/agrosuccess_succession.csv
"""
import os
import sys
import logging
import warnings
import pandas as pd
from config import DIRS, exit_if_file_missing
from constants import (
Succession,
Aspect,
SeedPresence,
Water,
MillingtonPaperLct as MLct,
AgroSuccessLct as AsLct,
)
# ------------------- Replace codes with human readable names------------------
def get_translator(trans_enum):
"""Make a function which uses an enum to translate a dataframe column."""
def code_translator(df, col_name, post_proc_func=None):
"""Replace codes with names in the :obj:`pandas.DataFrame` column.
Args:
df (:obj:`pandas.DataFrame`): Datafame containing column to
convert.
col_name (str): Name of the column to convert.
post_proc_func (function, optional): Function to apply to each
enum member name after it's been used to replace a codes in
the column.
"""
df.loc[:,col_name] = df[col_name].apply(lambda x: trans_enum(x).alias)
if post_proc_func:
df.loc[:,col_name] = df[col_name].apply(post_proc_func)
return df
return code_translator
def millington_trans_table_codes_to_names(df):
"""Replace state/condition codes in Millington trans table with names."""
for state_col in ["start", "delta_D"]:
df.loc[:,state_col] = df[state_col].apply(lambda x: MLct(x).alias)
for seed_col in ["pine", "oak", "deciduous"]:
df.loc[:,seed_col] = df[seed_col].apply(
lambda x: True if SeedPresence(x).alias=="true" else False)
cond_enum_d = {"succession": Succession, "aspect": Aspect, "water": Water}
for cond, e in cond_enum_d.items():
df.loc[:,cond] = df[cond].apply(lambda x: e(x).alias)
return df
# -------------- Convert 1:1 mapped state names to AgroSuccess-----------------
def convert_millington_names_to_agrosuccess(df, start_col, end_col):
"""Apply 1:1 mappings to rename states to match AgroSuccess conventions.
Note that we don't map `URBAN` or `HOLM_OAK_W_PASTURE` from the Millington
paper because these are dropped in the function
`drop_holm_oak_w_pasture_and_urban`. Likewise `PASTURE` and `SCRUBLAND` are
handled in `remove_transitions_bw_pasture_and_scrubland`. Finally
`CROPLAND` is handled separately in `replace_cropland_with_new_crop_types`.
"""
map_dict = {
MLct.PINE: AsLct.PINE,
MLct.TRANSITION_FOREST: AsLct.TRANS_FOREST,
MLct.DECIDUOUS: AsLct.DECIDUOUS,
MLct.HOLM_OAK: AsLct.OAK,
MLct.WATER_QUARRY: AsLct.WATER_QUARRY,
MLct.BURNT: AsLct.BURNT,
}
unmapped_m_lcts = [lct.name for lct in MLct
if lct not in map_dict.keys()]
expected_unmapped_lcts = ["PASTURE", "SCRUBLAND", "HOLM_OAK_W_PASTURE",
"CROPLAND", "URBAN"]
assert unmapped_m_lcts == expected_unmapped_lcts,\
"LCTs in Millington, not used in AgroSuccess"
unmapped_as_lcts = [lct.name for lct in AsLct
if lct not in map_dict.values()]
assert unmapped_as_lcts == ['WHEAT', 'DAL', 'SHRUBLAND'],\
"LCTs in AgroSuccess, not used in Millington"
for col in [start_col, end_col]:
for k, v in map_dict.items():
df.loc[:,col] = df[col].replace(k.alias, v.alias)
return df
# --------------------- Drop URBAN and HOLM_OAK_W_PASTURE ---------------------
def state_is_exclusive_source_of_other_state(trans_df, state_name, start_col,
end_col):
"""True if at least one state is only accessible from `state_name`."""
def tgt_states(df, src_lct_name):
"""Get the states which can originate from `src_lct_name`.
Exclude the `src_lct_name` state itself.
Returns:
list: Names of states which have `src_lct_name` as their source.
"""
all_trans = df.groupby(by=[start_col, end_col]).size().reset_index()
if len(all_trans[all_trans[start_col] == src_lct_name]) == 0:
warnings.warn("No start state called '{0}'".format(src_lct_name))
return []
else:
tgt_trans = all_trans[(all_trans[start_col] == src_lct_name)
& (all_trans[end_col] != src_lct_name)]
return list(tgt_trans[end_col].values)
def src_states(df, tgt_lct_name):
"""Get the states which `tgt_lct_name` can transition from.
Exclude the `tgt_lct_name` state itself.
Returns:
list: Names of states which can transition to `tgt_lct_name`.
"""
start_col = "start"
end_col = "delta_D"
all_trans = df.groupby(by=[start_col, end_col]).size().reset_index()
src_trans = all_trans[(all_trans[end_col] == tgt_lct_name)
& (all_trans[start_col] != tgt_lct_name)]
return list(src_trans[start_col].values)
states_from_state_name = tgt_states(trans_df, state_name)
exclusive_source_for = []
for other_state in states_from_state_name:
other_state_sources = src_states(trans_df, other_state)
other_state_sources.remove(state_name)
if len(other_state_sources) < 1:
exclusive_source_for.append(other_state)
if exclusive_source_for:
print("{0} is the only source for states: {1}".format(
state_name, ", ".join(exclusive_source_for)))
return True
else:
return False
def drop_holm_oak_w_pasture_and_urban(df, start_col, end_col):
"""Remove rows with excluded land cover types as start or end state.
The `URBAN` and `HOLM_OAK_W_PASTURE` land cover types used in Millington
2009 are not needed in AgroSuccess so should be removed entirely. To
ensure model integrity I will check that there are no land cover types
which *only* come about by transition *from* `URBAN` or
`HOLM_OAK_W_PASTURE`.
"""
def row_excludes_lct(row, lct_name):
"""Return True if row doesn't have lct as start or end state."""
start_col = "start"
end_col = "delta_D"
if row[start_col] == lct_name or row[end_col] == lct_name:
return False
else:
return True
# Confirm removing these states won't leave any other states in the model
# inaccessbile, and remove it.
for state in [MLct.HOLM_OAK_W_PASTURE.alias, MLct.URBAN.alias]:
assert state_is_exclusive_source_of_other_state(df, state, start_col,
end_col) == False
no_rows = len(df.index)
df = df[df.apply(lambda x: row_excludes_lct(x, state), axis=1)]
assert len(df.index) < no_rows
return df
# ------------ Replace 'cropland' with 'wheat' and 'DAL' --------
def replace_cropland_with_new_crop_types(df, start_col, end_col):
"""Replace Millington's cropland state with wheat and DAL.
Args:
df (:obj:`pandas.DataFrame`): Original transition table containing
'cropland' as a land cover state.
Returns:
df: A new dataframe where rows representing transitions involving
cropland are replaced with rows describing transitions involving
wheat and DAL (depleted agricultural land) states.
"""
# There are no transitions where cropland is the target state.
# Correspondingly no transitions have the new cropland land cover types
# as their target state. This makes sense, as cropland is something which
# humans need to create.
assert len(df[df[end_col] == MLct.CROPLAND.alias].index) == 0
# Rows from old table where cropland is the transition's starting state
from_cropland = df[df[start_col] == MLct.CROPLAND.alias]
new_crop_dfs = []
for crop in [AsLct.WHEAT.alias, AsLct.DAL.alias]:
new_crop = from_cropland.copy()
new_crop.loc[:,start_col] = crop
new_crop_dfs.append(new_crop)
new_df = df.copy()
# remove old cropland rows
new_df = new_df[new_df[start_col] != MLct.CROPLAND.alias]
new_df = pd.concat([new_df] + new_crop_dfs)
assert len(new_df.index) == (
len(df.index) - len(from_cropland.index)
+ 2 * len(from_cropland.index)), "Each transition rule starting with "\
+ "'cropland' should be replaced by one each from 'wheat' "\
+ "and 'DAL' but the resulting numbers of rows don't tally."
return new_df
# -- Unify 'pasture' and 'scrubland' types in Millington table to -------------
# -- AgroSuccess 'shrubland' type ---------------------------------------------
def remove_transitions_bw_pasture_and_scrubland(df, start_col, end_col):
"""Drop transitions between pasture and scrubland.
These two land cover types to subsequently removed and replaced with
'shrubland' type.
"""
scrub_to_pasture = ((df[start_col] == MLct.PASTURE.alias)
& (df[end_col] == MLct.SCRUBLAND.alias))
pasture_to_scrub = ((df[start_col] == MLct.SCRUBLAND.alias)
& (df[end_col] == MLct.PASTURE.alias))
return df[~scrub_to_pasture & ~pasture_to_scrub]
def duplicates_start_with_pasture_or_scrubland(df, start_col, end_col):
"""DataFrame with duplicated transitions.
All have 'pasture' or 'shrubland' as their start state.
"""
cond_cols = ["succession", "aspect", "pine", "oak", "deciduous", "water"]
rel_start_df = df[(df[start_col] == MLct.PASTURE.alias)
| (df[start_col] == MLct.SCRUBLAND.alias)]
duplicate_check_cols = cond_cols + [end_col]
duplicates = rel_start_df[rel_start_df.duplicated(duplicate_check_cols,
keep=False)]
duplicates = duplicates.sort_values(duplicate_check_cols)
return duplicates
def duplicates_end_with_pasture_or_scrubland(df, start_col, end_col):
"""DataFrame with duplicated transitions.
All have 'pasture' or 'shrubland' as their end state.
"""
cond_cols = ["succession", "aspect", "pine", "oak", "deciduous", "water"]
rel_start_df = df[(df[end_col] == MLct.PASTURE.alias)
| (df[end_col] == MLct.SCRUBLAND.alias)]
duplicate_check_cols = cond_cols + [start_col]
duplicates = rel_start_df[rel_start_df.duplicated(duplicate_check_cols,
keep=False)]
duplicates = duplicates.sort_values(duplicate_check_cols)
return duplicates
def replace_pasture_scrubland_with_shrubland(df, start_col, end_col):
"""Merge pasture and scrubland state transitions into 'shrubland'.
1. Remove transitions /between/ scrubland and pasture and vice versa.
2. Check there are no duplicate transitions which would be caused by an
identical set of conditions leading from or to both pasture and
scrubland being merged.
3. Rename all instances of either 'scrubland' or 'pasture' to 'shrubland'
4. Check for duplicates again.
"""
df = remove_transitions_bw_pasture_and_scrubland(df, start_col, end_col)
duplicates_start = duplicates_start_with_pasture_or_scrubland(df,
start_col, end_col)
assert len(duplicates_start.index) == 0, "No duplicates expected."
duplicates_end = duplicates_end_with_pasture_or_scrubland(df,
start_col, end_col)
assert len(duplicates_end.index) == 0, "No duplicates expected."
for col in [start_col, end_col]:
for lct in [MLct.SCRUBLAND.alias, MLct.PASTURE.alias]:
df.loc[:,col] = df[col].replace(lct, AsLct.SHRUBLAND.alias)
cond_cols = ["succession", "aspect", "pine", "oak", "deciduous", "water"]
cond_cols += [start_col, end_col]
assert len(df[df.duplicated(cond_cols)].index) == 0, "There should be "\
+ "no duplicated rows."
return df
# ----- Remove transitions starting and ending with same state ----------------
def remove_end_same_as_start_transitions(df, start_col, end_col):
"""Remove rows corresponding to transitions where start equals end state.
Millington 2009 used a methodology where if a combination of conditions
didn't result in a transition, this would be represented in the model by
specifying a transition with start and end state being the same, and a
transition time of 0 years.
AgroSuccess will handle 'no transition' rules differently, so these dummy
transitions should be excluded.
"""
def start_different_to_end(row):
if row[start_col] == row[end_col]:
return False
else:
return True
return df[df.apply(start_different_to_end, axis=1)]
# | |
<reponame>sparks-baird/RoboCrab<filename>crabnet/kingcrab.py<gh_stars>0
import numpy as np
import pandas as pd
import torch
from torch import nn
# %%
RNG_SEED = 42
torch.manual_seed(RNG_SEED)
np.random.seed(RNG_SEED)
data_type_torch = torch.float32
# %%
class ResidualNetwork(nn.Module):
"""
Feed forward Residual Neural Network as seen in Roost.
https://doi.org/10.1038/s41467-020-19964-7
"""
def __init__(self, input_dim, output_dim, hidden_layer_dims):
"""
Inputs
----------
input_dim: int
output_dim: int
hidden_layer_dims: list(int)
"""
super(ResidualNetwork, self).__init__()
dims = [input_dim] + hidden_layer_dims
self.fcs = nn.ModuleList(
[nn.Linear(dims[i], dims[i + 1]) for i in range(len(dims) - 1)]
)
self.res_fcs = nn.ModuleList(
[
nn.Linear(dims[i], dims[i + 1], bias=False)
if (dims[i] != dims[i + 1])
else nn.Identity()
for i in range(len(dims) - 1)
]
)
self.acts = nn.ModuleList([nn.LeakyReLU() for _ in range(len(dims) - 1)])
self.fc_out = nn.Linear(dims[-1], output_dim)
def forward(self, fea):
for fc, res_fc, act in zip(self.fcs, self.res_fcs, self.acts):
fea = act(fc(fea)) + res_fc(fea)
return self.fc_out(fea)
def __repr__(self):
return f"{self.__class__.__name__}"
class Embedder(nn.Module):
def __init__(self, d_model, compute_device=None):
super().__init__()
self.d_model = d_model
self.compute_device = compute_device
elem_dir = "data/element_properties"
# Choose what element information the model receives
mat2vec = f"{elem_dir}/mat2vec.csv" # element embedding
# mat2vec = f'{elem_dir}/onehot.csv' # onehot encoding (atomic number)
# mat2vec = f'{elem_dir}/random_200.csv' # random vec for elements
cbfv = pd.read_csv(mat2vec, index_col=0).values
feat_size = cbfv.shape[-1]
self.fc_mat2vec = nn.Linear(feat_size, d_model).to(self.compute_device)
zeros = np.zeros((1, feat_size))
# e.g. size: (118 elements + 1, 200 features)
cat_array = np.concatenate([zeros, cbfv])
cat_array = torch.as_tensor(cat_array, dtype=data_type_torch)
self.cbfv = nn.Embedding.from_pretrained(cat_array).to(
self.compute_device, dtype=data_type_torch
)
# e.g. size: ()
sbfv = np.ones_like(cbfv)
feat_size = sbfv.shape[-1]
zeros = np.zeros((1, feat_size))
cat_array = np.concatenate([zeros, sbfv])
cat_array = torch.as_tensor(cat_array, dtype=data_type_torch)
self.sbfv = nn.Embedding.from_pretrained(cat_array).to(
self.compute_device, dtype=data_type_torch
)
def forward(self, src, cat_feat, bool_src, float_feat):
"""
Compute elemental and structural embedding using elemental indices and robocrystallographer features.
Parameters
----------
src : torch.Tensor (Batch Size, # elements)
Elemental indices (padded). E.g. hydrogen encoded as 1.
cat_feat : torch.Tensor (Batch Size, # features = 3)
Categorical robocrystallographer features. E.g. 'dimensionality'.
bool_src : torch.Tensor (Batch Size, # features = 15)
Boolean robocrystallographer feature indices (padded). E.g. 'contains_tetrahedral'.
float_feat : torch.Tensor (Batch Size, # features = 44)
Numerical robocrystallographer features. E.g. 'average_bond_length'.
Returns
-------
x_emb : torch.Tensor (Batch Size, # features = # Elements + cat_feat.shape[2] + bool_feat.shape[2] + float_feat.shape[2], 200)
Embedding matrix (post FC-network) for mat2vec and robocrystallographer features (vertically stacked).
"""
mat2vec_emb = self.cbfv(src)
bool_emb = self.sbfv(bool_src)
# stack mat2vec_emb and (expanded/repeated) structural features
feats = [cat_feat, float_feat]
d = [1, 1, mat2vec_emb.shape[2]]
cat_feat, float_feat = [feat.unsqueeze(2).repeat(d) for feat in feats]
# size e.g. (256, # Elements + # Structural features = 159, 200)
feats = torch.cat([mat2vec_emb, cat_feat, bool_emb, float_feat], dim=1)
# size e.g. (256, 159, 512)
x_emb = self.fc_mat2vec(feats)
return x_emb
"""mini code graveyard"""
"""
# to determine filler dimension
# bool_len = list(map(len, bool_src))
# mx = max(bool_len) # this might need to be defined earlier for the full dataset
# add filler zeros
# bool_src = [
# torch.cat(
# [
# bools,
# torch.zeros(
# mx - len(bools), dtype=bool, device=self.compute_device
# ),
# ]
# )
# for bools in bool_src
# ]
# bool_src = pad(bools[[0, 0], [0, mx - len(bools)]])
# bool_src = torch.stack(bool_src)
# feats = torch.cat(feats, dim=1)
# cat_feat.repeat([1, len(mat2vec_emb)], 1)
"""
# %%
class FractionalEncoder(nn.Module):
"""
Encoding element fractional amount using a "fractional encoding" inspired by the positional encoder discussed by Vaswani.
See https://arxiv.org/abs/1706.03762
"""
def __init__(self, d_model, resolution=100, log10=False, compute_device=None):
super().__init__()
self.d_model = d_model // 2
self.resolution = resolution
self.log10 = log10
self.compute_device = compute_device
x = torch.linspace(
0, self.resolution - 1, self.resolution, requires_grad=False
).view(self.resolution, 1)
fraction = (
torch.linspace(0, self.d_model - 1, self.d_model, requires_grad=False)
.view(1, self.d_model)
.repeat(self.resolution, 1)
)
pe = torch.zeros(self.resolution, self.d_model)
pe[:, 0::2] = torch.sin(x / torch.pow(50, 2 * fraction[:, 0::2] / self.d_model))
pe[:, 1::2] = torch.cos(x / torch.pow(50, 2 * fraction[:, 1::2] / self.d_model))
pe = self.register_buffer("pe", pe)
def forward(self, x):
x = x.clone()
if self.log10:
x = 0.0025 * (torch.log2(x)) ** 2
x[x > 1] = 1
# x = 1 - x # for sinusoidal encoding at x=0
x[x < 1 / self.resolution] = 1 / self.resolution
frac_idx = torch.round(x * (self.resolution)).to(dtype=torch.long) - 1
out = self.pe[frac_idx]
return out
# %%
class Encoder(nn.Module):
def __init__(self, d_model, N, heads, frac=False, attn=True, compute_device=None):
super().__init__()
self.d_model = d_model
self.N = N
self.heads = heads
self.fractional = frac
self.attention = attn
self.compute_device = compute_device
self.embed = Embedder(d_model=self.d_model, compute_device=self.compute_device)
# what is resolution?
self.pe = FractionalEncoder(self.d_model, resolution=5000, log10=False)
self.ple = FractionalEncoder(self.d_model, resolution=5000, log10=True)
self.emb_scaler = nn.parameter.Parameter(torch.tensor([1.0]))
self.pos_scaler = nn.parameter.Parameter(torch.tensor([1.0]))
self.pos_scaler_log = nn.parameter.Parameter(torch.tensor([1.0]))
if self.attention:
encoder_layer = nn.TransformerEncoderLayer(
self.d_model, nhead=self.heads, dim_feedforward=2048, dropout=0.1
)
self.transformer_encoder = nn.TransformerEncoder(
encoder_layer, num_layers=self.N
)
def forward(self, src, frac, cat_feat, bool_src, float_feat):
# scaled, fully-connected mat2vec (fc_mat2vec) embedding, see Fig 6 of 10.1038/s41524-021-00545-1
x = self.embed(src, cat_feat, bool_src, float_feat) * 2 ** self.emb_scaler
nrobo_feats = [feat.shape[1] for feat in [cat_feat, bool_src, float_feat]]
d1, d2, d3 = [[frac.shape[0], n] for n in nrobo_feats] # d2 unused
# mask has 1 if n-th element is present, 0 if not. E.g. single element compound has mostly mask of 0's
# element
emask = frac
# if I changed frac to 1's and then normalized, then I think I get element-only (not compositional)
# emask[emask != 0] = 1.0
# emask = nn.functional.normalize(emask, p=1, dim=1)
# or change frac to 0's which eliminates elemental contribution
emask[emask != 0] = 0
# category
cmask = torch.ones(d1, device=self.compute_device, dtype=torch.float)
# boolean
bmask = bool_src
bmask[bmask != 0] = 1
bmask = bmask.float()
# bmask = [bmask_sub//torch.count_nonzero(bmask_sub) for bmask_sub in bmask]
# bmask = torch.ones(d2, device=self.compute_device, dtype=torch.float)
# bmask = torch.zeros(d2, device=self.compute_device, dtype=torch.float)
# float
fmask = torch.ones(d3, device=self.compute_device, dtype=torch.float)
# L-1 normalize rows (sum to 1)
masks = [emask, cmask, bmask, fmask]
# masks = [nn.functional.normalize(mask, p=1, dim=1) for mask in masks]
# concatenate masks
mask_2d = torch.cat(masks, dim=1)
# make "rows" sum to 1
# mask_2d = nn.functional.normalize(mask_2d, p=1, dim=1)
# unsqueeze
mask = mask_2d.unsqueeze(dim=-1)
# create src_mask
mask = torch.matmul(mask, mask.transpose(-2, -1))
mask[mask != 0] = 1
src_mask = mask[:, 0] != 1
# fractional encoding, see Fig 6 of 10.1038/s41524-021-00545-1
pe = torch.zeros_like(x) # prevalence encoding
ple = torch.zeros_like(x) # prevalence log encoding
pe_scaler = 2 ** (1 - self.pos_scaler) ** 2
ple_scaler = 2 ** (1 - self.pos_scaler_log) ** 2
# first half of features are prevalence encoded (i.e. 512//2==256)
pe[:, :, : self.d_model // 2] = self.pe(mask_2d) * pe_scaler
# second half of features are prevalence log encoded
ple[:, :, self.d_model // 2 :] = self.ple(mask_2d) * ple_scaler
if self.attention:
# sum of fc_mat2vec embedding (x), prevalence encoding (pe), and prevalence log encoding (ple)
# see Fig 6 of 10.1038/s41524-021-00545-1
x_src = x + pe + ple
x_src = x_src.transpose(0, 1)
# transformer encoding
"""note on src_key_padding_mask: if provided, specified padding elements
in the key will be ignored by the attention. When given a binary mask
and a value is True, the corresponding value on the attention layer
will be ignored. When given a byte mask and a value is non-zero, the
corresponding value on the attention layer will be ignored.
Source: https://pytorch.org/docs/stable/generated/torch.nn.MultiheadAttention.html
https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoder.html"""
x = self.transformer_encoder(x_src, src_key_padding_mask=src_mask)
x = x.transpose(0, 1)
if self.fractional:
x = x * frac.unsqueeze(2).repeat(1, 1, self.d_model)
"""0:1 index eliminates the repeated values (down to 1 colummn)
repeat() fills it back up (to e.g. d_model == 512 values)"""
hmask = mask[:, :, 0:1].repeat(1, 1, self.d_model)
if mask is not None:
# set values of x which correspond to an element not being present to 0
x = x.masked_fill(hmask == 0, 0)
return x
"""mini code graveyard"""
"""
#nrobo_feat = sum([feat.shape[1] for feat in [cat_feat, bool_feat, float_feat]])
# ones = torch.ones(d, device=self.compute_device, dtype=src.dtype)
# frac = torch.cat([frac, ones / nrobo_feat], dim=1)
# d = [frac.shape[0], nrobo_feat]
# nrobo_feat = x.shape[1] - src.shape[1]
# "fractional coordinates" for structural features are constant (ones or scaled constant)
# should I divide by the number of structural features? Probably best to have some type of normalization to avoid nans
| |
#!/usr/bin/env python3
"""
Surrogate utility.
Attributes
----------
LGR
Logger
"""
import logging
from copy import deepcopy
from math import factorial, floor, ceil
import numpy as np
from nigsp.operations.timeseries import graph_fourier_transform
from nigsp.operations.laplacian import decomposition
LGR = logging.getLogger(__name__)
SURR_TYPE = ['informed', 'uninformed']
STAT_METHOD = ['Bernoulli', 'frequentist']
def random_sign(eigenvec, n_surr=1000, seed=42, stack=False):
"""
Create surrogates by randomly switching signs of eigenvectors.
Parameters
----------
eigenvec : numpy.ndarray
A matrix of eigenvectors
n_surr : int, optional
Number of surrogates to create
seed : int or None, optional
Random seed (for repeatability)
stack : bool, optional
If True, add original eigenvec as last entry of the last dimension
of the created surrogate matrix
Returns
-------
numpy.ndarray
The matrix of surrogates, of shape eigenvec * n_surr(+1)
Raises
------
NotImplementedError
If eigenvec is 4+ D.
"""
# #!# Allow for input of random sign matrix, if None call random sign.
if seed is not None:
# Reinitialise the random seed for repeatability
np.random.seed(seed)
if eigenvec.ndim > 3:
raise NotImplementedError(f'Provided data has {eigenvec.ndim} dimensions, '
'but data of more than 3 dimensions are not '
'supported yet')
rand_evec = np.empty_like(eigenvec, dtype='float32')
rand_evec = rand_evec[..., np.newaxis].repeat(n_surr, axis=-1)
LGR.info('Randomly switching signs of eigenvectors to create surrogates.')
for i in range(n_surr):
# #!# Check if two conditions can be merged.
if eigenvec.ndim < 3:
r_sign = np.random.rand(eigenvec.shape[0]).round()
r_sign[r_sign == 0] = -1
rand_evec[..., i] = eigenvec * r_sign
else:
for j in range(eigenvec.shape[2]):
r_sign = np.random.rand(eigenvec.shape[0]).round()
r_sign[r_sign == 0] = -1
rand_evec[:, :, j, i] = eigenvec[..., j] * r_sign
if stack:
rand_evec = np.append(rand_evec, eigenvec[..., np.newaxis], axis=-1)
return rand_evec
def _create_surr(timeseries, eigenvec, n_surr, seed, stack):
"""
Proper surrogate creation step.
This is not meant to be called as a function.
Parameters
----------
timeseries : numpy.ndarray
A 3D (or less) array coding a timeseries in the second axis (axis 1).
eigenvec : numpy.ndarray
The eigenvector matrix from a previous Laplacian decomposition.
n_surr : int
The number of surrogates to create
seed : int or None
The seed to reinitialise the RNG - used for replicability.
stack : bool
If True, append the real matrix at the end of the stack.
Returns
-------
numpy.ndarray
The surrogate matrix, of shape timeseries.shape, n_surr
Raises
------
NotImplementedError
If timeseries is 4+ D and/or eigenvector matrix has not enough dimensions.
"""
rand_evec = random_sign(eigenvec, n_surr, seed, stack)
surr = np.empty_like(timeseries, dtype='float32')
surr = surr[..., np.newaxis].repeat(n_surr, axis=-1)
fourier_coeff = graph_fourier_transform(timeseries, eigenvec)
LGR.info('Projecting the timeseries onto the surrogate eigenvectors.')
if stack:
n_surr += 1
for i in range(n_surr):
if timeseries.ndim < 3 and rand_evec.ndim == timeseries.ndim+1:
surr[..., i] = graph_fourier_transform(fourier_coeff, rand_evec[..., i].T)
elif timeseries.ndim == 3:
if rand_evec.ndim < 4:
surr[..., i] = graph_fourier_transform(fourier_coeff,
rand_evec[..., i].T)
else:
for j in range(rand_evec.shape[2]):
surr[:, :, j, i] = graph_fourier_transform(fourier_coeff,
rand_evec[:, :, j, i].T)
else:
raise NotImplementedError('No solution implemented for timeseries '
f'of {timeseries.ndim} dimensions and '
f'eigenvector matrix of {eigenvec.ndim}')
return surr
def sc_informed(timeseries, eigenvec, n_surr=1000, seed=124, stack=False):
"""
Create surrogates informed by the real structural connectivity.
Parameters
----------
timeseries : numpy.ndarray
A 3D (or less) array coding a timeseries in the second axis (axis 1).
eigenvec : numpy.ndarray
The eigenvector matrix from a previous Laplacian decomposition.
n_surr : int, optional
The number of surrogates to create
seed : int or None, optional
The seed to reinitialise the RNG - used for replicability.
stack : bool, optional
If True, append the real matrix at the end of the stack.
Returns
-------
numpy.ndarray
The surrogate matrix, of shape timeseries.shape, n_surr
Raises
------
NotImplementedError
If timeseries is 4+ D.
"""
if timeseries.ndim > 3:
raise NotImplementedError(f'Provided timeseries has {timeseries.ndim} '
'dimensions, but timeseries of more than 3 '
'dimensions are not supported yet.')
return _create_surr(timeseries, eigenvec, n_surr, seed, stack)
def sc_uninformed(timeseries, lapl_mtx, n_surr=1000, seed=98, stack=False):
"""
Create surrogates ignorant of the real structural connectivity.
Parameters
----------
timeseries : numpy.ndarray
A 3D (or less) array coding a timeseries in the second axis (axis 1).
lapl_mtx : numpy.ndarray
A symmetrically normalised laplacian matrix.
n_surr : int, optional
The number of surrogates to create
seed : int or None, optional
The seed to reinitialise the RNG - used for replicability.
stack : bool, optional
If True, append the real matrix at the end of the stack.
Returns
-------
numpy.ndarray
The surrogate matrix, of shape timeseries.shape, n_surr
Raises
------
NotImplementedError
If timeseries is 4+ D.
"""
if timeseries.ndim > 3:
raise NotImplementedError(f'Provided timeseries has {timeseries.ndim} '
'dimensions, but timeseries of more than 3 '
'dimensions are not supported yet.')
symm_norm = np.eye(lapl_mtx.shape[0]) - lapl_mtx
symm_norm_sum = symm_norm.sum(axis=-1)
conf_model = np.outer(symm_norm_sum,
symm_norm_sum.T) / symm_norm.sum()
conf_lapl = np.diag(symm_norm_sum) - conf_model
_, surr_eigenvec = decomposition(conf_lapl)
return _create_surr(timeseries, surr_eigenvec, n_surr, seed, stack)
def test_significance(surr, data=None, method='Bernoulli', p=0.05,
return_masked=False, mean=False):
"""
Test the significance of the empirical data against surrogates.
Two methods are implemented, 'Bernoulli' and 'frequentist'.
- 'frequentist' is a group or single subject test. It tests that the
empirical data are in the highest (or lowest) percentile (where the
percentile is defined by p/2).
- 'Bernoulli' is a group test. It tests that the number of subjects for
which the empirical data is higher (or lower) than a set of surrogates
(frequentist approach) is at the tail of a binomial cumulative
distribution (where 'tail' is defined by p).
Note that p is expressed as two-tails test for the frequentist approach and
a one-tail test for the Bernoulli approach.
Both surr and data are expected to have first dimensions: observations x [subjects].
Parameters
----------
surr : numpy.ndarray
The surrogate matrix, where all surrogates are aligned along the last axis.
May have the empirical data matrix last along the last axis.
Expected to have shape: observations, [subjects,] surrogates.
data : numpy.ndarray or None, optional
The empirical data matrix. If given, it's appended at the end of the
surrogate matrix.
Expected to have shape: observations[, subjects].
method : 'Bernoulli' or 'frequentist', optional
The method to adopt for testing, either based on a Bernoulli process
or a frequentist observation (see above).
p : float, optional
The probability threshold to adopt. Note that this is a two-tails test.
return_masked : bool, optional
If True, returns the masked data. If False, returns a mask that holds
True where the good data are (inverse of numpy mask). Mask has the same
shape as data.
mean : bool, optional
If True, returns the average of the masked data along the last axis.
Returns
-------
numpy.ndarray
A numpy.ndarray shaped obervations[, subjects]. If return_masked is True,
returns the masked version of `data`, otherwise returns the mask.
If mean is True, returns the average along the subject axis.
Raises
------
NotImplementedError
If any other method rather than those listed above is selected.
"""
# #!# Check that the surrogate shape has parcels in the first axis!
# If provided, append data to surr
if data is not None:
if surr.shape[:data.ndim] != data.shape:
raise ValueError('Provided empirical data and surrogate data shapes '
f'does not agree, with shapes {data.shape} and '
f'{surr.shape[:data.ndim]} (last axis excluded)')
surr = np.append(surr, data[..., np.newaxis], axis=-1)
if surr.ndim < 3:
LGR.warning(f'Warning: surrogate dimensions ({surr.ndim}) are less than '
'the program expects - check that you mean to run a test on '
'an average or that you have enough surrogates.')
# Reorder the surrogate matrix, then find where the real surrogate is
LGR.info('Reordering surrogates for test')
real_idx = surr.shape[-1]-1
reord_surr = (np.argsort(surr, axis=-1) == real_idx)
LGR.info(f'Adopting {method} testing method.')
# Testing both tails requires to split p
if method == 'frequentist':
LGR.info(f'Testing for p={p} two-tails (p={p/2} each tail)')
p = p / 2
# If there aren't enough surrogates, send a warning message on the real p
# Then update p
if 1/surr.shape[-1] > p:
LGR.warning('The generated surrogates are not enough to test for '
f'the selected p ({p*2} two-tails), since at least '
f'{ceil(1/p)-1} surrogates are required for the selected '
f'p value. Testing for p={1/surr.shape[-1]} two-tails instead.')
p = 1 / surr.shape[-1]
elif method == | |
)
self.assertEqual( bPrims[0].attribValue( "Cd" ), ( 0, 0, 0 ) )
self.assertEqual( cPrims[0].attribValue( "Cd" ), ( 1, 0, 0 ) )
self.assertEqual( dPrims[0].attribValue( "Cd" ), ( 0.5, 0.5, 0 ) )
def testSaveLoadCortexObjects( self ) :
self.writeSCC()
sop = self.sop()
sop.parm( "geometryType" ).set( IECoreHoudini.SceneCacheNode.GeometryType.Cortex )
null = sop.createOutputNode( "null" )
nullPath = null.path()
prims = null.geometry().prims()
self.assertEqual( len(prims), 3 )
for i in range( 0, 3 ) :
self.assertEqual( prims[i].type(), hou.primType.Custom )
self.assertEqual( prims[i].vertices()[0].point().number(), i )
# make sure they survive the locks
null.setHardLocked( True )
null.setInput( 0, None )
prims = null.geometry().prims()
self.assertEqual( len(prims), 3 )
for i in range( 0, 3 ) :
self.assertEqual( prims[i].type(), hou.primType.Custom )
self.assertEqual( prims[i].vertices()[0].point().number(), i )
# make sure they survive a scene save/load
hou.hipFile.save( TestSceneCache.__testHip )
hou.hipFile.load( TestSceneCache.__testHip )
null = hou.node( nullPath )
prims = null.geometry().prims()
self.assertEqual( len(prims), 3 )
for i in range( 0, 3 ) :
self.assertEqual( prims[i].type(), hou.primType.Custom )
self.assertEqual( prims[i].vertices()[0].point().number(), i )
# make sure they survive bgeo caching
writer = null.createOutputNode( "file" )
writer.parm( "file" ).set( TestSceneCache.__testBgeo )
writer.parm( "filemode" ).set( 2 ) # write
writer.cook( force = True )
reader = null.parent().createNode( "file" )
reader.parm( "file" ).set( TestSceneCache.__testBgeo )
prims = reader.geometry().prims()
self.assertEqual( len(prims), 3 )
for i in range( 0, 3 ) :
self.assertEqual( prims[i].type(), hou.primType.Custom )
self.assertEqual( prims[i].vertices()[0].point().number(), i )
# make sure they survive bgeo.gz caching
writer.parm( "file" ).set( TestSceneCache.__testBgeoGz )
writer.cook( force = True )
reader = null.parent().createNode( "file" )
reader.parm( "file" ).set( TestSceneCache.__testBgeoGz )
prims = reader.geometry().prims()
self.assertEqual( len(prims), 3 )
for i in range( 0, 3 ) :
self.assertEqual( prims[i].type(), hou.primType.Custom )
self.assertEqual( prims[i].vertices()[0].point().number(), i )
# make sure they survive geo caching
writer.parm( "file" ).set( TestSceneCache.__testGeo )
writer.cook( force = True )
reader = null.parent().createNode( "file" )
reader.parm( "file" ).set( TestSceneCache.__testGeo )
prims = reader.geometry().prims()
self.assertEqual( len(prims), 3 )
for i in range( 0, 3 ) :
self.assertEqual( prims[i].type(), hou.primType.Custom )
self.assertEqual( prims[i].vertices()[0].point().number(), i )
def testStashing( self ) :
self.writeSCC()
sop = self.sop()
writer = sop.createOutputNode( "file" )
writer.parm( "file" ).set( TestSceneCache.__testBgeo )
writer.parm( "filemode" ).set( 2 ) # write
writer.cook( force = True )
reader = sop.parent().createNode( "file" )
reader.parm( "file" ).set( TestSceneCache.__testBgeo )
reader.cook( force = True )
reader.cook( force = True )
def testNonPrimitiveCortexObjects( self ) :
scene = self.writeAnimSCC()
coordChild = scene.child( "1" ).createChild( "coord" )
coord = IECore.CoordinateSystem()
coord.setName( "testing" )
coord.setTransform( IECore.MatrixTransform( IECore.M44f.createTranslated( IECore.V3f( 1, 2, 3 ) ) ) )
coordChild.writeObject( coord, 0 )
coord.setTransform( IECore.MatrixTransform( IECore.M44f.createTranslated( IECore.V3f( 1, 5, 5 ) ) ) )
coordChild.writeObject( coord, 1 )
intsChild = scene.createChild( "ints" )
intsChild.writeObject( IECore.IntVectorData( [ 1, 10, 20, 30 ] ), 0 )
del scene, coordChild, intsChild
spf = 1.0 / hou.fps()
hou.setTime( 0 - spf )
sop = self.sop()
sop.parm( "geometryType" ).set( IECoreHoudini.SceneCacheNode.GeometryType.Cortex )
prims = sop.geometry().prims()
self.assertEqual( len(prims), 5 )
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( sorted(nameAttr.strings()), ['/1', '/1/2', '/1/2/3', '/1/coord', '/ints'] )
for name in nameAttr.strings() :
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == name ]), 1 )
for i in range( 0, 5 ) :
self.assertEqual( prims[i].type(), hou.primType.Custom )
self.assertEqual( prims[i].vertices()[0].point().number(), i )
aPrim = [ x for x in prims if x.attribValue( nameAttr ) == '/1' ][0]
bPrim = [ x for x in prims if x.attribValue( nameAttr ) == '/1/2' ][0]
cPrim = [ x for x in prims if x.attribValue( nameAttr ) == '/1/2/3' ][0]
coordPrim = [ x for x in prims if x.attribValue( nameAttr ) == '/1/coord' ][0]
intsPrim = [ x for x in prims if x.attribValue( nameAttr ) == '/ints' ][0]
self.assertEqual( aPrim.vertices()[0].point().position(), hou.Vector3( 1.5, 0.5, 0.5 ) )
self.assertEqual( bPrim.vertices()[0].point().position(), hou.Vector3( 3.5, 0.5, 0.5 ) )
self.assertEqual( cPrim.vertices()[0].point().position(), hou.Vector3( 6.5, 0.5, 0.5 ) )
self.assertEqual( coordPrim.vertices()[0].point().position(), hou.Vector3( 2, 2, 3 ) )
self.assertEqual( intsPrim.vertices()[0].point().position(), hou.Vector3( 0, 0, 0 ) )
converter = IECoreHoudini.FromHoudiniGeometryConverter.create( sop )
self.assertTrue( isinstance( converter, IECoreHoudini.FromHoudiniCompoundObjectConverter ) )
result = converter.convert()
self.assertTrue( isinstance( result, IECore.CompoundObject ) )
self.assertEqual( sorted( result.keys() ), ['/1', '/1/2', '/1/2/3', '/1/coord', '/ints'] )
self.assertTrue( isinstance( result["/1"], IECore.MeshPrimitive ) )
self.assertTrue( isinstance( result["/1/2"], IECore.MeshPrimitive ) )
self.assertTrue( isinstance( result["/1/2/3"], IECore.MeshPrimitive ) )
self.assertTrue( isinstance( result["/1/coord"], IECore.CoordinateSystem ) )
self.assertEqual( result["/ints"], IECore.IntVectorData( [ 1, 10, 20, 30 ] ) )
hou.setTime( 1 - spf )
prims = sop.geometry().prims()
self.assertEqual( len(prims), 5 )
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( sorted(nameAttr.strings()), ['/1', '/1/2', '/1/2/3', '/1/coord', '/ints'] )
for name in nameAttr.strings() :
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == name ]), 1 )
for i in range( 0, 5 ) :
self.assertEqual( prims[i].type(), hou.primType.Custom )
self.assertEqual( prims[i].vertices()[0].point().number(), i )
aPrim = [ x for x in prims if x.attribValue( nameAttr ) == '/1' ][0]
bPrim = [ x for x in prims if x.attribValue( nameAttr ) == '/1/2' ][0]
cPrim = [ x for x in prims if x.attribValue( nameAttr ) == '/1/2/3' ][0]
coordPrim = [ x for x in prims if x.attribValue( nameAttr ) == '/1/coord' ][0]
intsPrim = [ x for x in prims if x.attribValue( nameAttr ) == '/ints' ][0]
self.assertEqual( aPrim.vertices()[0].point().position(), hou.Vector3( 1.5, 1.5, 0.5 ) )
self.assertEqual( bPrim.vertices()[0].point().position(), hou.Vector3( 3.5, 2.5, 0.5 ) )
self.assertEqual( cPrim.vertices()[0].point().position(), hou.Vector3( 6.5, 3.5, 0.5 ) )
self.assertEqual( coordPrim.vertices()[0].point().position(), hou.Vector3( 2, 6, 5 ) )
self.assertEqual( intsPrim.vertices()[0].point().position(), hou.Vector3( 0, 0, 0 ) )
sop.parm( "geometryType" ).set( IECoreHoudini.SceneCacheNode.GeometryType.Houdini )
prims = sop.geometry().prims()
self.assertEqual( len(prims), 18 )
self.assertTrue( "/1/coord" in sop.warnings() )
self.assertTrue( "/ints" in sop.warnings() )
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( nameAttr.strings(), tuple( [ '/1', '/1/2', '/1/2/3' ] ) )
for name in nameAttr.strings() :
self.assertEqual( len([ x for x in prims if x.attribValue( "name" ) == name ]), 6 )
self.assertEqual( prims[0].vertex( 0 ).point().position(), hou.Vector3( 1, 1, 0 ) )
self.assertEqual( prims[6].vertex( 0 ).point().position(), hou.Vector3( 3, 2, 0 ) )
self.assertEqual( prims[12].vertex( 0 ).point().position(), hou.Vector3( 6, 3, 0 ) )
def testCoordinateSystemNoTransform( self ) :
scene = IECore.SceneCache( TestSceneCache.__testFile, IECore.IndexedIO.OpenMode.Write )
coordChild = scene.createChild( "coord")
coord = IECore.CoordinateSystem()
coord.setName( "testing" )
coordChild.writeObject( coord, 0 )
coordChild2 = coordChild.createChild( "other")
coordChild2.writeTransform( IECore.M44dData( IECore.M44d.createTranslated( IECore.V3d( 1, 2, 3 ) ) ), 0 )
coordChild2.writeObject( coord, 0 )
del scene, coordChild, coordChild2
sop = self.sop()
sop.parm( "geometryType" ).set( IECoreHoudini.SceneCacheNode.GeometryType.Cortex )
prims = sop.geometry().prims()
self.assertEqual( len(prims), 2 )
nameAttr = sop.geometry().findPrimAttrib( "name" )
self.assertEqual( sorted(nameAttr.strings()), [ "/coord", "/coord/other" ] )
self.assertEqual( prims[0].attribValue( "name" ), "/coord" )
self.assertEqual( prims[0].type(), hou.primType.Custom )
self.assertEqual( prims[0].vertices()[0].point().number(), 0 )
self.assertEqual( prims[0].vertices()[0].point().position(), hou.Vector3( 0, 0, 0 ) )
self.assertEqual( prims[1].attribValue( "name" ), "/coord/other" )
self.assertEqual( prims[1].type(), hou.primType.Custom )
self.assertEqual( prims[1].vertices()[0].point().number(), 1 )
self.assertEqual( prims[1].vertices()[0].point().position(), hou.Vector3( 1, 2, 3 ) )
def testObjectMerge( self ) :
self.writeSCC()
xform = self.xform()
xform.parm( "expand" ).pressButton()
origSop = hou.node( xform.path()+"/1/2/geo/2" )
merge = hou.node( "/obj" ).createNode( "geo" ).createNode( "object_merge" )
merge.parm( "objpath1" ).set( origSop.path() )
# not transformed because we haven't set "Into this Object"
geo = merge.geometry()
self.assertEqual( geo.points()[0].position(), hou.Vector3( 0.5, 0.5, 0.5 ) )
self.assertEqual( geo.boundingBox(), hou.BoundingBox( 0, 0, 0, 1, 1, 1 ) )
# transformed to its world position
merge.parm( "xformtype" ).set( 1 ) # "Into this Object"
geo = merge.geometry()
self.assertEqual( geo.points()[0].position(), hou.Vector3( 3.5, 0.5, 0.5 ) )
self.assertEqual( geo.boundingBox(), hou.BoundingBox( 3, 0, 0, 4, 1, 1 ) )
mesh = IECoreHoudini.FromHoudiniGeometryConverter.create( merge ).convert()
self.assertEqual( mesh.bound(), IECore.Box3f( IECore.V3f( 3, 0, 0 ), IECore.V3f( 4, 1, 1 ) ) )
# didn't affect the original SOP because it stores it's own copy of the prim
geo = origSop.geometry()
self.assertEqual( geo.points()[0].position(), hou.Vector3( 0.5, 0.5, 0.5 ) )
self.assertEqual( geo.boundingBox(), hou.BoundingBox( 0, 0, 0, 1, 1, 1 ) )
mesh = IECoreHoudini.FromHoudiniGeometryConverter.create( origSop ).convert()
self.assertEqual( mesh.bound(), IECore.Box3f( IECore.V3f( 0 ), IECore.V3f( 1 ) ) )
# transformable at the SOP level as well
sopXform = merge.createOutputNode( "xform" )
sopXform.parm( "ty" ).set( 7 )
geo = sopXform.geometry()
self.assertEqual( geo.points()[0].position(), hou.Vector3( 3.5, 7.5, 0.5 ) )
self.assertEqual( geo.boundingBox(), hou.BoundingBox( 3, 7, 0, 4, 8, 1 ) )
mesh = IECoreHoudini.FromHoudiniGeometryConverter.create( sopXform ).convert()
self.assertEqual( mesh.bound(), IECore.Box3f( IECore.V3f( 3, 7, 0 ), IECore.V3f( 4, 8, 1 ) ) )
# didn't affect the input SOP because it stores it's own copy of the prim
geo = merge.geometry()
self.assertEqual( geo.points()[0].position(), hou.Vector3( 3.5, 0.5, 0.5 ) )
self.assertEqual( geo.boundingBox(), hou.BoundingBox( 3, 0, 0, 4, 1, 1 ) )
mesh = IECoreHoudini.FromHoudiniGeometryConverter.create( merge ).convert()
self.assertEqual( mesh.bound(), IECore.Box3f( IECore.V3f( 3, 0, 0 ), IECore.V3f( 4, 1, 1 ) ) )
def testPointsDontAccumulate( self ) :
obj = hou.node("/obj")
geo = obj.createNode("geo", run_init_scripts=False)
box = geo.createNode( "box" )
facet = geo.createNode( "facet" )
facet.parm("postnml").set(True)
points = geo.createNode( "scatter" )
points.parm( "npts" ).set( 5000 )
facet.setInput( 0, box )
points.setInput( 0, facet )
points.setRenderFlag( True )
points.setDisplayFlag( True )
rop = self.rop( geo )
rop.parm( "trange" ).set( 1 )
rop.parmTuple( "f" ).set( ( 1, 10, 1 ) )
rop.parm( "execute" ).pressButton()
sop = self.sop()
sop.parm( "file" ).set( TestSceneCache.__testOutFile )
sop.parm( "geometryType" ).set( IECoreHoudini.SceneCacheNode.GeometryType.Houdini )
for t in range( 0, 10 ) :
hou.setTime( t )
self.assertEqual( len(sop.geometry().points()), 5000 )
self.assertEqual( len(sop.geometry().prims()), 1 )
def testTimeDependent( self ) :
self.writeSCC()
xform = self.xform()
xform.parm( "expand" ).pressButton()
self.assertFalse( hou.node( xform.path()+"/1" ).isTimeDependent() )
self.assertFalse( hou.node( xform.path()+"/1/geo" ).isTimeDependent() )
self.assertFalse( hou.node( xform.path()+"/1/geo/1" ).isTimeDependent() )
self.assertFalse( hou.node( xform.path()+"/1/2" ).isTimeDependent() )
self.assertFalse( hou.node( xform.path()+"/1/2/geo" ).isTimeDependent() )
self.assertFalse( hou.node( xform.path()+"/1/2/geo/2" ).isTimeDependent() )
self.assertFalse( hou.node( xform.path()+"/1/2/3" ).isTimeDependent() )
self.assertFalse( hou.node( xform.path()+"/1/2/3/geo" ).isTimeDependent() )
self.assertFalse( hou.node( xform.path()+"/1/2/3/geo/3" ).isTimeDependent() )
scene = self.writeSCC()
sc1 | |
empty data."))
if (self.view): raise(Data.Unsupported("Cannot retype a data view, either retype original data or copy and then retype."))
# Handle a single column or type being provided.
if (type(columns) == str): columns = [columns]
elif (type(columns) == int): columns = [self.names[columns]]
# Initialize "columns" if it was not provided to be the whole Data.
if (columns is None): columns = list(range(len(self.names)))
if (type(types) == type): types = [types] * len(columns)
elif (len(types) != len(columns)):
raise(Data.BadSpecifiedType(f"{Data.retype} given {len(types)} types, epxected {len(columns)}."))
# Verify that all columns are valid names, convert to integers.
for i in range(len(columns)):
if (type(columns[i]) == str):
if (columns[i] not in self.names):
raise(Data.BadSpecifiedName(f"No column named '{col}' exists in this data."))
columns[i] = self.names.index(columns[i])
elif (type(columns[i]) == int):
if not (-len(self.names) <= columns[i] < len(self.names)):
raise(Data.BadIndex(f"Provided column index {columns[i]} is out of range."))
else: raise(Data.ImproperUsage(f"Unrecognized column index {columns[i]}."))
for j,(new_t, c) in enumerate(zip(types, columns)):
# Update user on progress if too much time has elapsed..
if (time.time() - start) > self.max_wait:
print(f" {100.*j/len(columns):.2f}% retype", end="\r", flush=True)
start = time.time()
old_t = self.types[c]
if (new_t == old_t): continue
# Update the stored type
self.types[c] = new_t
# Retype all non-missing elements in that column (in place)
for i in range(len(self)):
if (self[i,c] is None): continue
try:
self[i,c] = new_t(self[i,c])
except ValueError:
raise(Data.BadSpecifiedType(f"Type casting {new_t} for column {c} is not compatible with existing value '{self[i,c]}' on row {i}."))
# Given a new column, add it to this Data.
def add_column(self, column, name=None, index=None, new_type=type(None)):
import time
start = time.time()
# Special case for being empty.
if (self.empty):
if (name is None): name = "0"
self.names = []
self.types = []
index = 0
# Set the default index to add a column to be the end.
if (index == None): index = self.shape[1]
# Verify the name.
if (name is None):
num = 0
while (str(num) in self.names): num += 1
name = str(num)
elif (type(name) != str):
raise(Data.BadSpecifiedName(f"Only string names are allowed. Received name '{name}' with type {type(name)}."))
elif (name in self.names):
raise(Data.BadSpecifiedName(f"Attempting to add duplicate column name '{name}' that already exists in this Data."))
# Verify the column type dynamically. Add new values to all rows.
i = -1
for i,val in enumerate(column):
# Update user on progress if too much time has elapsed..
if (time.time() - start) > self.max_wait:
print(f" {100.*i/len(self):.2f}% add column", end="\r", flush=True)
start = time.time()
# Verify valid index first..
if (self.shape[1] > 1) and (i >= len(self)):
# Remove the added elements if the length was not right
for j in range(len(self)): self[j].pop(index)
# Raise error for too long of a column
raise(Data.BadData(f"Provided column has at least {i+1} elements, more than the length of this data ({len(self)})."))
# If this is the first column in the data.
elif (self.shape[1] == 0):
self.append(Data.Row(self, [val]))
# Append the value to this row for normal operation.
else: self[i].insert(index, val)
# Only add to missing values entry if it's not already there
if (val is None): pass
# Capture the new type (type must be right)
elif (new_type == type(None)): new_type = type(val)
# Error out otherwise.
elif (type(val) != new_type):
# Remove the added elements because a problem was encountered.
for j in range(i+1): self[j].pop(index)
# This is a new type, problem!
raise(Data.BadValue(f"Provided column has multiple types. Original type {new_type}, but '{val}' has type {type(val)}."))
# Verify the column length
if (i < len(self)-1):
# Remove the added elements if the length was not right
for j in range(i+1): self[j].pop(index)
# Raise error for too short of a column
raise(Data.BadData(f"Provided column has length {i+1}, less than the length of this data ({len(self)})."))
# Set the name and type.
self.names.insert(index, name)
self.types.insert(index, new_type)
# Given a column made of iterables, unpack all elements and make
# as many columns are necessary to suit the largest length. Make
# new columns with naming scheme, fill empty slots with None.
def unpack(self, column):
# Check for errors.
if (self.view): raise(Data.Unsupported(f"This Data is a view and cannot be unpacked."))
if (column not in self.names): raise(Data.BadIndex(f"No column named '{column}' exists in this Data."))
# Check to see if the column contains things that are iterable.
if (not any(is_iterable(v) for v in self[column])):
raise(Data.ImproperUsage(f"No elements of '{column}' support iteration."))
# Start tracking runtime.
import time
start = time.time()
# Extract the old values from this Data one element at a time,
# making a list of iterators for each element.
values = []
for v in self.pop(column):
if (v is None):
values.append(None)
else:
# Try iterating over the object, otherwise construct a
# tuple containing just the first value (since it
# can't provide iteration.
try: values.append( iter(v) )
except: values.append( iter((v,)) )
# One element at a time, add columns to this Data.
idx = 1
empty_elements = [0]
while (empty_elements[0] < len(self)):
# Update user on progress if too much time has elapsed..
if (time.time() - start) > self.max_wait:
print(f" {idx} inflating..", end="\r", flush=True)
start = time.time()
# Look for the next element in each iterable.
empty_elements[0] = 0
# Construct a generator that pulls one element from each row.
def column_generator():
for i in range(len(values)):
if (values[i] is None):
empty_elements[0] += 1
yield None
else:
try:
yield next(values[i])
except StopIteration:
empty_elements[0] += 1
values[i] = None
yield None
# Add the column using the generator object if it's not empty.
column_name = column + f' {idx}'
self.add_column(column_generator(), name=column_name)
idx += 1
# Pop out the last added column, because it only contains empty elements.
self.pop(column_name)
# Reverse the 'unpack' operation, using the same expected naming scheme.
def pack(self, name):
if (self.view): raise(Data.Unsupported(f"This Data is a view and cannot be packed."))
if all((name != n[:len(name)]) for n in self.names):
raise(Data.BadIndex(f"No flattened columns by name '{name}' exist in this Data."))
# Start tracking runtime.
import time
start = [time.time()]
# Identify those columns that need to be cobined into one column.
to_collapse = [n for n in self.names if n[:len(name)] == name]
# Sort the keys by their numerical index.
to_collapse.sort(key=lambda n: int(n[len(name)+1:]))
def row_generator():
for i,row in enumerate(zip(*(self.pop(col) for col in to_collapse))):
# Update user on progress if too much time has elapsed..
if (time.time() - start[0]) > self.max_wait:
print(f" {i+1}:{len(self)} packing..", end="\r", flush=True)
start[0] = time.time()
# Find the location of the last None value in this row.
for last_none in range(1,len(row)+1):
if (row[-last_none] is not None): break
# If there are *only* None values, yield None.
else:
yield None
continue
# If the entire row is made of None, then return None.
if (last_none > 1): yield list(row[:1-last_none])
else: yield list(row)
# Pop out all of the old columns and add one new column.
self.add_column(row_generator(), name=name)
# Given a list of column names, modify this Data so that all
# specified column names are stacked in lists associated with
# unique combinations of unspecified column names.
def stack(self, columns):
if (self.view): raise(Data.Unsupported("Cannot 'stack' a data view, either 'stack' original data or copy and then 'stack'."))
from .utilities import hash
import time
start = time.time()
# Adjust for usage (where user provides only one column).
if (type(columns) != list): columns = [columns]
# Verify all of the provided columns.
for i in range(len(columns)):
if (type(columns[i]) == int): columns[i] = self.names[i]
elif (type(columns[i]) == str):
if (columns[i] not in self.names):
raise(Data.BadIndex(f"There is no column named '{columns[i]}' in this data."))
else:
raise(Data.BadIndex("The index '{columns[i]}' is not recognized. Only {int} and {str} are allowed."))
# Create a view of the columns that will be kept for hashing.
keep_columns = [i for (i,n) in enumerate(self.names) if n not in columns]
keep_view = self[:,keep_columns]
# Get all columns that will be stacked and rename them.
stacked_columns = [n for n in self.names if n in columns]
for i in map(self.names.index, stacked_columns):
self.names[i] += " unstacked"
| |
) -> "SupervisionSegment":
"""
Return a copy of the current segment, transformed with ``transform_fn``.
:param transform_fn: a function that takes a segment as input, transforms it and returns a new segment.
:return: a modified ``SupervisionSegment``.
"""
return transform_fn(self)
def transform_text(
self, transform_fn: Callable[[str], str]
) -> "SupervisionSegment":
"""
Return a copy of the current segment with transformed ``text`` field.
Useful for text normalization, phonetic transcription, etc.
:param transform_fn: a function that accepts a string and returns a string.
:return: a ``SupervisionSegment`` with adjusted text.
"""
if self.text is None:
return self
return fastcopy(self, text=transform_fn(self.text))
def transform_alignment(
self, transform_fn: Callable[[str], str], type: Optional[str] = "word"
) -> "SupervisionSegment":
"""
Return a copy of the current segment with transformed ``alignment`` field.
Useful for text normalization, phonetic transcription, etc.
:param type: alignment type to transform (key for alignment dict).
:param transform_fn: a function that accepts a string and returns a string.
:return: a ``SupervisionSegment`` with adjusted alignments.
"""
if self.alignment is None:
return self
return fastcopy(
self,
alignment={
ali_type: [
item.transform(transform_fn=transform_fn)
if ali_type == type
else item
for item in ali
]
for ali_type, ali in self.alignment.items()
},
)
def to_dict(self) -> dict:
return asdict_nonull(self)
@staticmethod
def from_dict(data: dict) -> "SupervisionSegment":
from lhotse.serialization import deserialize_custom_field
if "custom" in data:
deserialize_custom_field(data["custom"])
if "alignment" in data:
data["alignment"] = {
k: [AlignmentItem(**x) for x in v] for k, v in data["alignment"].items()
}
return SupervisionSegment(**data)
def __setattr__(self, key: str, value: Any):
"""
This magic function is called when the user tries to set an attribute.
We use it as syntactic sugar to store custom attributes in ``self.custom``
field, so that they can be (de)serialized later.
"""
if key in self.__dataclass_fields__:
super().__setattr__(key, value)
else:
custom = ifnone(self.custom, {})
custom[key] = value
self.custom = custom
def __getattr__(self, name: str) -> Any:
"""
This magic function is called when the user tries to access an attribute
of :class:`.SupervisionSegment` that doesn't exist.
It is used as syntactic sugar for accessing the custom supervision attributes.
We use it to look up the ``custom`` field: when it's None or empty,
we'll just raise AttributeError as usual.
If ``item`` is found in ``custom``, we'll return ``self.custom[item]``.
Example of adding custom metadata and retrieving it as an attribute::
>>> sup = SupervisionSegment('utt1', recording_id='rec1', start=0,
... duration=1, channel=0, text='Yummy.')
>>> sup.gps_coordinates = "34.1021097,-79.1553182"
>>> coordinates = sup.gps_coordinates
"""
try:
return self.custom[name]
except:
raise AttributeError(f"No such attribute: {name}")
class SupervisionSet(Serializable, Sequence[SupervisionSegment]):
"""
:class:`~lhotse.supervision.SupervisionSet` represents a collection of segments containing some
supervision information (see :class:`~lhotse.supervision.SupervisionSegment`),
that are indexed by segment IDs.
It acts as a Python ``dict``, extended with an efficient ``find`` operation that indexes and caches
the supervision segments in an interval tree.
It allows to quickly find supervision segments that correspond to a specific time interval.
When coming from Kaldi, think of :class:`~lhotse.supervision.SupervisionSet` as a ``segments`` file on steroids,
that may also contain *text*, *utt2spk*, *utt2gender*, *utt2dur*, etc.
Examples
Building a :class:`~lhotse.supervision.SupervisionSet`::
>>> from lhotse import SupervisionSet, SupervisionSegment
>>> sups = SupervisionSet.from_segments([SupervisionSegment(...), ...])
Writing/reading a :class:`~lhotse.supervision.SupervisionSet`::
>>> sups.to_file('supervisions.jsonl.gz')
>>> sups2 = SupervisionSet.from_file('supervisions.jsonl.gz')
Using :class:`~lhotse.supervision.SupervisionSet` like a dict::
>>> 'rec00001-sup00000' in sups
True
>>> sups['rec00001-sup00000']
SupervisionSegment(id='rec00001-sup00000', recording_id='rec00001', start=0.5, ...)
>>> for segment in sups:
... pass
Searching by ``recording_id`` and time interval::
>>> matched_segments = sups.find(recording_id='rec00001', start_after=17.0, end_before=25.0)
Manipulation::
>>> longer_than_5s = sups.filter(lambda s: s.duration > 5)
>>> first_100 = sups.subset(first=100)
>>> split_into_4 = sups.split(num_splits=4)
>>> shuffled = sups.shuffle()
"""
def __init__(self, segments: Mapping[str, SupervisionSegment]) -> None:
self.segments = ifnone(segments, {})
def __eq__(self, other: "SupervisionSet") -> bool:
return self.segments == other.segments
@property
def is_lazy(self) -> bool:
"""
Indicates whether this manifest was opened in lazy (read-on-the-fly) mode or not.
"""
from lhotse.serialization import LazyJsonlIterator
return isinstance(self.segments, LazyJsonlIterator)
@property
def ids(self) -> Iterable[str]:
return self.segments.keys()
@staticmethod
def from_segments(segments: Iterable[SupervisionSegment]) -> "SupervisionSet":
return SupervisionSet(segments=index_by_id_and_check(segments))
from_items = from_segments
@staticmethod
def from_dicts(data: Iterable[Dict]) -> "SupervisionSet":
return SupervisionSet.from_segments(
SupervisionSegment.from_dict(s) for s in data
)
@staticmethod
def from_rttm(path: Union[Pathlike, Iterable[Pathlike]]) -> "SupervisionSet":
"""
Read an RTTM file located at ``path`` (or an iterator) and create a :class:`.SupervisionSet` manifest for them.
Can be used to create supervisions from custom RTTM files (see, for example, :class:`lhotse.dataset.DiarizationDataset`).
.. code:: python
>>> from lhotse import SupervisionSet
>>> sup1 = SupervisionSet.from_rttm('/path/to/rttm_file')
>>> sup2 = SupervisionSet.from_rttm(Path('/path/to/rttm_dir').rglob('ref_*'))
The following description is taken from the [dscore](https://github.com/nryant/dscore#rttm) toolkit:
Rich Transcription Time Marked (RTTM) files are space-delimited text files
containing one turn per line, each line containing ten fields:
- ``Type`` -- segment type; should always by ``SPEAKER``
- ``File ID`` -- file name; basename of the recording minus extension (e.g.,
``rec1_a``)
- ``Channel ID`` -- channel (1-indexed) that turn is on; should always be
``1``
- ``Turn Onset`` -- onset of turn in seconds from beginning of recording
- ``Turn Duration`` -- duration of turn in seconds
- ``Orthography Field`` -- should always by ``<NA>``
- ``Speaker Type`` -- should always be ``<NA>``
- ``Speaker Name`` -- name of speaker of turn; should be unique within scope
of each file
- ``Confidence Score`` -- system confidence (probability) that information
is correct; should always be ``<NA>``
- ``Signal Lookahead Time`` -- should always be ``<NA>``
For instance:
SPEAKER CMU_20020319-1400_d01_NONE 1 130.430000 2.350 <NA> <NA> juliet <NA> <NA>
SPEAKER CMU_20020319-1400_d01_NONE 1 157.610000 3.060 <NA> <NA> tbc <NA> <NA>
SPEAKER CMU_20020319-1400_d01_NONE 1 130.490000 0.450 <NA> <NA> chek <NA> <NA>
:param path: Path to RTTM file or an iterator of paths to RTTM files.
:return: a new ``SupervisionSet`` instance containing segments from the RTTM file.
"""
from pathlib import Path
path = [path] if isinstance(path, (Path, str)) else path
segments = []
for file in path:
with open(file, "r") as f:
for idx, line in enumerate(f):
parts = line.strip().split()
assert len(parts) == 10, f"Invalid RTTM line in file {file}: {line}"
recording_id = parts[1]
segments.append(
SupervisionSegment(
id=f"{recording_id}-{idx:06d}",
recording_id=recording_id,
channel=int(parts[2]),
start=float(parts[3]),
duration=float(parts[4]),
speaker=parts[7],
)
)
return SupervisionSet.from_segments(segments)
def with_alignment_from_ctm(
self, ctm_file: Pathlike, type: str = "word", match_channel: bool = False
) -> "SupervisionSet":
"""
Add alignments from CTM file to the supervision set.
:param ctm: Path to CTM file.
:param type: Alignment type (optional, default = `word`).
:param match_channel: if True, also match channel between CTM and SupervisionSegment
:return: A new SupervisionSet with AlignmentItem objects added to the segments.
"""
ctm_words = []
with open(ctm_file) as f:
for line in f:
reco_id, channel, start, duration, symbol = line.strip().split()
ctm_words.append(
(reco_id, int(channel), float(start), float(duration), symbol)
)
ctm_words = sorted(ctm_words, key=lambda x: (x[0], x[2]))
reco_to_ctm = defaultdict(
list, {k: list(v) for k, v in groupby(ctm_words, key=lambda x: x[0])}
)
segments = []
num_total = len(ctm_words)
num_overspanned = 0
for reco_id in set([s.recording_id for s in self]):
if reco_id in reco_to_ctm:
for seg in self.find(recording_id=reco_id):
alignment = [
AlignmentItem(symbol=word[4], start=word[2], duration=word[3])
for word in reco_to_ctm[reco_id]
if overspans(seg, TimeSpan(word[2], word[2] + word[3]))
and (seg.channel == word[1] or not match_channel)
]
num_overspanned += len(alignment)
segments.append(fastcopy(seg, alignment={type: alignment}))
else:
segments.append([s for s in self.find(recording_id=reco_id)])
logging.info(
f"{num_overspanned} alignments added out of {num_total} total. If there are several"
" missing, there could be a mismatch problem."
)
return SupervisionSet.from_segments(segments)
def write_alignment_to_ctm(self, ctm_file: Pathlike, type: str = "word") -> None:
"""
Write alignments to CTM file.
:param ctm_file: Path to output CTM file (will be created if not exists)
:param type: Alignment type to write (default = `word`)
"""
with open(ctm_file, "w") as f:
for s in self:
if type in s.alignment:
for ali in s.alignment[type]:
f.write(
f"{s.recording_id} {s.channel} {ali.start:.02f} {ali.duration:.02f} {ali.symbol}\n"
)
def to_dicts(self) -> Iterable[dict]:
return (s.to_dict() for s in self)
def shuffle(self, rng: Optional[random.Random] = None) -> "SupervisionSet":
"""
Shuffle the supervision IDs in the current :class:`.SupervisionSet` and return a shuffled copy of self.
:param rng: an optional instance of ``random.Random`` for precise control of randomness.
:return: a shuffled copy of self.
"""
if rng is None:
rng = random
ids = list(self.ids)
rng.shuffle(ids)
return SupervisionSet(segments={sid: self[sid] for sid in ids})
def split(
self, num_splits: | |
+----------+-------+--------+
| Exponent | Name | Suffix |
+==========+=======+========+
| 1E-24 | yocto | y |
+----------+-------+--------+
| 1E-21 | zepto | z |
+----------+-------+--------+
| 1E-18 | atto | a |
+----------+-------+--------+
| 1E-15 | femto | f |
+----------+-------+--------+
| 1E-12 | pico | p |
+----------+-------+--------+
| 1E-9 | nano | n |
+----------+-------+--------+
| 1E-6 | micro | u |
+----------+-------+--------+
| 1E-3 | milli | m |
+----------+-------+--------+
| 1E+0 | | |
+----------+-------+--------+
| 1E+3 | kilo | k |
+----------+-------+--------+
| 1E+6 | mega | M |
+----------+-------+--------+
| 1E+9 | giga | G |
+----------+-------+--------+
| 1E+12 | tera | T |
+----------+-------+--------+
| 1E+15 | peta | P |
+----------+-------+--------+
| 1E+18 | exa | E |
+----------+-------+--------+
| 1E+21 | zetta | Z |
+----------+-------+--------+
| 1E+24 | yotta | Y |
+----------+-------+--------+
For example:
>>> import peng
>>> peng.peng(1235.6789E3, 3, False)
'1.236M'
"""
# The decimal module has a to_eng_string() function, but it does not seem
# to work well in all cases. For example:
# >>> decimal.Decimal('34.5712233E8').to_eng_string()
# '3.45712233E+9'
# >>> decimal.Decimal('34.57122334E8').to_eng_string()
# '3457122334'
# It seems that the conversion function does not work in all cases
#
# Return formatted zero if number is zero, easier to not deal with this
# special case through the rest of the algorithm
if number == 0:
number = "0.{zrs}".format(zrs="0" * frac_length) if frac_length else "0"
# Engineering notation numbers can have a sign, a 3-digit integer part,
# a period, and a fractional part of length frac_length, so the
# length of the number to the left of, and including, the period is 5
return "{0} ".format(number.rjust(5 + frac_length)) if rjust else number
# Low-bound number
sign = +1 if number >= 0 else -1
ssign = "-" if sign == -1 else ""
anumber = abs(number)
if anumber < 1e-24:
anumber = 1e-24
number = sign * 1e-24
# Round fractional part if requested frac_length is less than length
# of fractional part. Rounding method is to add a '5' at the decimal
# position just after the end of frac_length digits
exp = 3.0 * math.floor(math.floor(math.log10(anumber)) / 3.0)
mant = number / 10 ** exp
# Because exponent is a float, mantissa is a float and its string
# representation always includes a period
smant = str(mant)
ppos = smant.find(".")
if len(smant) - ppos - 1 > frac_length:
mant += sign * 5 * 10 ** (-frac_length - 1)
if abs(mant) >= 1000:
exp += 3
mant = mant / 1e3
smant = str(mant)
ppos = smant.find(".")
# Make fractional part have frac_length digits
bfrac_length = bool(frac_length)
flength = ppos - (not bfrac_length) + frac_length + 1
new_mant = smant[:flength].ljust(flength, "0")
# Upper-bound number
if exp > 24:
new_mant, exp = (
"{sign}999.{frac}".format(sign=ssign, frac="9" * frac_length),
24,
)
# Right-justify number, engineering notation numbers can have a sign,
# a 3-digit integer part and a period, and a fractional part of length
# frac_length, so the length of the number to the left of the
# period is 4
new_mant = new_mant.rjust(rjust * (4 + bfrac_length + frac_length))
# Format number
num = "{mant}{suffix}".format(
mant=new_mant, suffix=_POWER_TO_SUFFIX_DICT[exp] if exp else " " * bool(rjust)
)
return num
@pexdoc.pcontracts.contract(snum="engineering_notation_number")
def peng_float(snum):
r"""
Return floating point equivalent of a number represented in engineering notation.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: string
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.functions.peng_float
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import peng
>>> peng.peng_float(peng.peng(1235.6789E3, 3, False))
1236000.0
"""
# This can be coded as peng_mant(snum)*(peng_power(snum)[1]), but the
# "function unrolling" is about 4x faster
snum = snum.rstrip()
power = _SUFFIX_POWER_DICT[" " if snum[-1].isdigit() else snum[-1]]
return float(snum if snum[-1].isdigit() else snum[:-1]) * power
@pexdoc.pcontracts.contract(snum="engineering_notation_number")
def peng_frac(snum):
r"""
Return the fractional part of a number represented in engineering notation.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: integer
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.functions.peng_frac
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import peng
>>> peng.peng_frac(peng.peng(1235.6789E3, 3, False))
236
"""
snum = snum.rstrip()
pindex = snum.find(".")
if pindex == -1:
return 0
return int(snum[pindex + 1 :] if snum[-1].isdigit() else snum[pindex + 1 : -1])
def peng_int(snum):
r"""
Return the integer part of a number represented in engineering notation.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: integer
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for peng.functions.peng_int
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import peng
>>> peng.peng_int(peng.peng(1235.6789E3, 3, False))
1
"""
return int(peng_mant(snum))
@pexdoc.pcontracts.contract(snum="engineering_notation_number")
def peng_mant(snum):
r"""
Return the mantissa of a number represented in engineering notation.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: float
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.functions.peng_mant
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import peng
>>> peng.peng_mant(peng.peng(1235.6789E3, 3, False))
1.236
"""
snum = snum.rstrip()
return float(snum if snum[-1].isdigit() else snum[:-1])
@pexdoc.pcontracts.contract(snum="engineering_notation_number")
def peng_power(snum):
r"""
Return engineering suffix and its floating point equivalent of a number.
:py:func:`peng.peng` lists the correspondence between suffix and floating
point exponent.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: named tuple in which the first item is the engineering suffix and
the second item is the floating point equivalent of the suffix
when the number is represented in engineering notation.
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.functions.peng_power
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import peng
>>> peng.peng_power(peng.peng(1235.6789E3, 3, False))
EngPower(suffix='M', exp=1000000.0)
"""
suffix = " " if snum[-1].isdigit() else snum[-1]
return EngPower(suffix, _SUFFIX_POWER_DICT[suffix])
@pexdoc.pcontracts.contract(snum="engineering_notation_number")
def peng_suffix(snum):
r"""
Return the suffix of a number represented in engineering notation.
:param snum: Number
:type snum: :ref:`EngineeringNotationNumber`
:rtype: string
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.functions.peng_suffix
:raises: RuntimeError (Argument \`snum\` is not valid)
.. [[[end]]]
For example:
>>> import peng
>>> peng.peng_suffix(peng.peng(1235.6789E3, 3, False))
'M'
"""
snum = snum.rstrip()
return " " if snum[-1].isdigit() else snum[-1]
@pexdoc.pcontracts.contract(suffix="engineering_notation_suffix", offset=int)
def peng_suffix_math(suffix, offset):
r"""
Return engineering suffix from a starting suffix and an number of suffixes offset.
:param suffix: Engineering suffix
:type suffix: :ref:`EngineeringNotationSuffix`
:param offset: Engineering suffix offset
:type offset: integer
:rtype: string
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc()) ]]]
.. Auto-generated exceptions documentation for
.. peng.functions.peng_suffix_math
:raises:
* RuntimeError (Argument \`offset\` is not valid)
* RuntimeError (Argument \`suffix\` is not valid)
* ValueError (Argument \`offset\` is not valid)
.. [[[end]]]
For example:
>>> import peng
>>> peng.peng_suffix_math('u', 6)
'T'
"""
# pylint: disable=W0212
eobj = pexdoc.exh.addex(ValueError, "Argument `offset` is not valid")
try:
return _POWER_TO_SUFFIX_DICT[_SUFFIX_TO_POWER_DICT[suffix] + 3 * offset]
except KeyError:
eobj(True)
def remove_extra_delims(expr, ldelim="(", rdelim=")"):
r"""
Remove unnecessary delimiters in mathematical expressions.
Delimiters (parenthesis, brackets, etc.) may be removed either because
there are multiple consecutive delimiters enclosing a single expressions or
because the delimiters are implied by operator precedence rules. Function
names must start with a letter and then can contain alphanumeric characters
and a maximum of one underscore
:param expr: Mathematical expression
:type expr: string
:param ldelim: Single character left delimiter
:type ldelim: string
:param rdelim: Single character right delimiter
:type rdelim: string
:rtype: string
:raises:
* RuntimeError (Argument \`expr\` is not valid)
* RuntimeError (Argument \`ldelim\` is not valid)
* RuntimeError (Argument \`rdelim\` is not valid)
* RuntimeError (Function name `*[function_name]*` is not valid)
* RuntimeError (Mismatched delimiters)
"""
op_group = ""
for item1 in _OP_PREC:
if isinstance(item1, list):
for item2 in item1:
op_group += item2
else:
op_group += item1
iobj = zip([expr, ldelim, rdelim], ["expr", "ldelim", "rdelim"])
for item, desc in iobj:
if not isinstance(item, str):
raise RuntimeError("Argument `{0}` is not valid".format(desc))
if (len(ldelim) != 1) or ((len(ldelim) == 1) and (ldelim in op_group)):
raise RuntimeError("Argument `ldelim` is not valid")
if (len(rdelim) != 1) or ((len(rdelim) == 1) and (rdelim in op_group)):
raise RuntimeError("Argument `rdelim` is not valid")
| |
and the previous revision.
:type DevicePolicyChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyChangedCols: The fields that changed between this revision of the record and the previous revision.
:type DevicePolicyChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevicePolicyEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type DevicePolicyEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type DevicePolicyEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevicePolicyID: The internal NetMRI identifier for this device policy status record.
:type DevicePolicyID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyID: The internal NetMRI identifier for this device policy status record.
:type DevicePolicyID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevicePolicyStartTime: The starting effective time of this revision of the record.
:type DevicePolicyStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyStartTime: The starting effective time of this revision of the record.
:type DevicePolicyStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DevicePolicyTimestamp: The date and time this record was collected or calculated.
:type DevicePolicyTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DevicePolicyTimestamp: The date and time this record was collected or calculated.
:type DevicePolicyTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyID: The internal NetMRI identifier for the policy whose status this record represents.
:type PolicyID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyID: The internal NetMRI identifier for the policy whose status this record represents.
:type PolicyID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesChecked: The total number of rules that were checked against this device for this policy. Invalid rules and rules that are skipped due to the device not matching the rule filter are not counted as 'checked' rules.
:type PolicyRulesChecked: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesChecked: The total number of rules that were checked against this device for this policy. Invalid rules and rules that are skipped due to the device not matching the rule filter are not counted as 'checked' rules.
:type PolicyRulesChecked: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesError: The total number of rules in this policy that the device failed with error status.
:type PolicyRulesError: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesError: The total number of rules in this policy that the device failed with error status.
:type PolicyRulesError: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesFailed: The total number of rules in this policy that the device failed with info, warning, or error status.
:type PolicyRulesFailed: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesFailed: The total number of rules in this policy that the device failed with info, warning, or error status.
:type PolicyRulesFailed: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesInfo: The total number of rules in this policy that the device failed with info status.
:type PolicyRulesInfo: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesInfo: The total number of rules in this policy that the device failed with info status.
:type PolicyRulesInfo: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesInvalid: The total number of invalid rules that were in this policy at the time the policy was executed against this device.
:type PolicyRulesInvalid: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesInvalid: The total number of invalid rules that were in this policy at the time the policy was executed against this device.
:type PolicyRulesInvalid: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesPassed: The total number of rules in this policy that the device passed successfully.
:type PolicyRulesPassed: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesPassed: The total number of rules in this policy that the device passed successfully.
:type PolicyRulesPassed: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesSkipped: The total number of rules in this policy that were skipped due to the device not matching the rule filters.
:type PolicyRulesSkipped: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesSkipped: The total number of rules in this policy that were skipped due to the device not matching the rule filters.
:type PolicyRulesSkipped: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesTotal: The total number of rules that in this policy at the time the policy was executed against this device.
:type PolicyRulesTotal: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesTotal: The total number of rules that in this policy at the time the policy was executed against this device.
:type PolicyRulesTotal: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesUnknown: The total number of rules that could not be fully evaluated because information needed for the rule was not available (for example, the configuration file has not been collected for the device).
:type PolicyRulesUnknown: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesUnknown: The total number of rules that could not be fully evaluated because information needed for the rule was not available (for example, the configuration file has not been collected for the device).
:type PolicyRulesUnknown: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param PolicyRulesValid: The total number of valid rules that were in this policy at the time the policy was executed against this device. An invalid rule generally only occurs if the XML rule build has been used and an improper XML format has been specified.
:type PolicyRulesValid: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param PolicyRulesValid: The total number of valid rules that were in this policy at the time the policy was executed against this device. An invalid rule generally only occurs if the XML rule build has been used and an improper XML format has been specified.
:type | |
<reponame>LilSpazJoekp/docstrfmt
import asyncio
import contextlib
import glob
import os
import signal
import sys
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from copy import copy
from functools import partial
from multiprocessing import Manager as MultiManager
from multiprocessing import freeze_support
from os.path import abspath, basename, isdir, join
from pathlib import Path
from textwrap import dedent, indent
from typing import TYPE_CHECKING, Any, List, Optional
import click
import libcst as cst
import toml
from black import (
Mode,
TargetVersion,
cancel,
find_pyproject_toml,
parse_pyproject_toml,
shutdown,
)
from click import Context
from libcst import CSTTransformer, Expr
from libcst.metadata import ParentNodeProvider, PositionProvider
from docstrfmt.const import __version__
from docstrfmt.debug import dump_node
from docstrfmt.docstrfmt import Manager
from docstrfmt.exceptions import InvalidRstErrors
from docstrfmt.util import FileCache, plural
if TYPE_CHECKING: # pragma: no cover
from libcst import AssignTarget, ClassDef, FunctionDef, Module, SimpleString
echo = partial(click.secho, err=True)
DEFAULT_EXCLUDE = [
"**/.direnv/",
"**/.direnv/",
"**/.eggs/",
"**/.git/",
"**/.hg/",
"**/.mypy_cache/",
"**/.nox/",
"**/.tox/",
"**/.venv/",
"**/.svn/",
"**/_build",
"**/buck-out",
"**/build",
"**/dist",
]
# Define this here to support Python <3.7.
class nullcontext(contextlib.AbstractContextManager): # type: ignore
def __init__(self, enter_result: Any = None):
self.enter_result = enter_result
def __enter__(self) -> Any:
return self.enter_result
def __exit__(self, *excinfo: Any) -> Any:
pass
class Reporter:
def __init__(self, level=1):
self.level = level
self.error_count = 0
def _log_message(self, message, level, **formatting_kwargs):
if self.level >= level:
echo(message, **formatting_kwargs)
sys.stderr.flush()
sys.stdout.flush()
def debug(self, message, **formatting_kwargs):
self._log_message(message, 3, bold=False, fg="blue", **formatting_kwargs)
def error(self, message, **formatting_kwargs):
self._log_message(message, -1, bold=False, fg="red", **formatting_kwargs)
def print(self, message, level=0, **formatting_kwargs):
formatting_kwargs.setdefault("bold", level == 0)
self._log_message(message, level, **formatting_kwargs)
reporter = Reporter(0)
class Visitor(CSTTransformer):
METADATA_DEPENDENCIES = (PositionProvider, ParentNodeProvider)
def __init__(self, object_name, file, line_length, manager):
super().__init__()
self._last_assign = None
self._object_names = [object_name]
self._object_type = None
self._blank_line = manager.docstring_trailing_line
self.file = file
self.line_length = line_length
self.manager = manager
self.misformatted = False
self.error_count = 0
def _is_docstring(self, node: "SimpleString") -> bool:
return node.quote.startswith(('"""', "'''")) and isinstance(
self.get_metadata(ParentNodeProvider, node), Expr
)
def leave_ClassDef(self, original_node: "ClassDef", updated_node: "ClassDef"):
self._object_names.pop(-1)
return updated_node
def leave_FunctionDef(
self, original_node: "FunctionDef", updated_node: "FunctionDef"
):
self._object_names.pop(-1)
return updated_node
def leave_SimpleString(
self, original_node: "SimpleString", updated_node: "SimpleString"
):
if self._is_docstring(original_node):
position_meta = self.get_metadata(PositionProvider, original_node)
if self._last_assign:
self._object_names.append(self._last_assign.target.children[2].value)
old_object_type = copy(self._object_type)
self._object_type = "attribute"
indent_level = position_meta.start.column
source = dedent(
(" " * indent_level) + original_node.evaluated_value
).rstrip()
doc = self.manager.parse_string(self.file, source)
if reporter.level >= 3:
reporter.debug("=" * 60)
reporter.debug(dump_node(doc))
width = self.line_length - indent_level
if width < 1:
self.error_count += 1
raise ValueError(f"Invalid starting width {self.line_length}")
output = self.manager.format_node(width, doc, True).rstrip()
self.error_count += self.manager.error_count
self.manager.error_count = 0
object_display_name = (
f'{self._object_type} {".".join(self._object_names)!r}'
)
single_line = len(output.splitlines()) == 1
original_strip = original_node.evaluated_value.rstrip(" ")
end_line_count = len(original_strip) - len(original_strip.rstrip("\n"))
ending = "" if single_line else "\n\n" if self._blank_line else "\n"
if single_line:
correct_ending = 0 == end_line_count
else:
correct_ending = int(self._blank_line) + 1 == end_line_count
if source == output and correct_ending:
reporter.print(
f"Docstring for {object_display_name} in file {str(self.file)!r} is formatted correctly. Nice!",
1,
)
else:
self.misformatted = True
file_link = f'File "{self.file}"'
reporter.print(
f"Found incorrectly formatted docstring. Docstring for {object_display_name} in {file_link}.",
1,
)
value = indent(
f'{original_node.prefix}"""{output}{ending}"""', " " * indent_level
).lstrip()
updated_node = updated_node.with_changes(value=value)
if self._last_assign:
self._last_assign = None
self._object_names.pop(-1)
self._object_type = old_object_type
return updated_node
def visit_AssignTarget_target(self, node: "AssignTarget") -> None:
self._last_assign = node
def visit_ClassDef(self, node: "ClassDef") -> Optional[bool]:
self._object_names.append(node.name.value)
self._object_type = "class"
self._last_assign = None
return True
def visit_FunctionDef(self, node: "FunctionDef") -> Optional[bool]:
self._object_names.append(node.name.value)
self._object_type = "function"
self._last_assign = None
return True
def visit_Module(self, node: "Module") -> Optional[bool]:
self._object_type = "module"
return True
async def _run_formatter(
check,
file_type,
files,
include_txt,
docstring_trailing_line,
mode,
raw_output,
cache,
loop,
executor,
):
# This code is heavily based on that of psf/black
# see here for license: https://github.com/psf/black/blob/master/LICENSE
todo, already_done = cache.gen_todo_list(files)
cancelled = []
files_to_cache = []
lock = MultiManager().Lock()
line_length = mode.line_length
misformatted_files = set()
tasks = {
asyncio.ensure_future(
loop.run_in_executor(
executor,
_format_file,
check,
file,
file_type,
include_txt,
line_length,
mode,
docstring_trailing_line,
raw_output,
lock,
)
): file
for file in sorted(todo)
}
in_process = tasks.keys()
try:
loop.add_signal_handler(signal.SIGINT, cancel, in_process)
loop.add_signal_handler(signal.SIGTERM, cancel, in_process)
except NotImplementedError: # pragma: no cover
# There are no good alternatives for these on Windows.
pass
error_count = 0
while in_process:
done, _ = await asyncio.wait(in_process, return_when=asyncio.FIRST_COMPLETED)
for task in done:
file = tasks.pop(task)
if task.cancelled(): # pragma: no cover
cancelled.append(task)
elif task.exception(): # pragma: no cover
reporter.error(str(task.exception()))
error_count += 1
else:
misformatted, errors = task.result()
sys.stderr.flush()
error_count += errors
if misformatted:
misformatted_files.add(file)
if (
not (misformatted and raw_output) or (check and not misformatted)
) and errors == 0:
files_to_cache.append(file)
if cancelled: # pragma: no cover
await asyncio.gather(*cancelled, loop=loop, return_exceptions=True)
if files_to_cache:
cache.write_cache(files_to_cache)
return misformatted_files, error_count
def _format_file(
check,
file,
file_type,
include_txt,
line_length,
mode,
docstring_trailing_line,
raw_output,
lock,
):
error_count = 0
manager = Manager(reporter, mode, docstring_trailing_line)
if file.name == "-":
raw_output = True
reporter.print(f"Checking {file}", 2)
misformatted = False
with nullcontext(sys.stdin) if file.name == "-" else open(
file, encoding="utf-8"
) as f:
input_string = f.read()
newline = getattr(f, "newlines", None)
# If mixed or unknown newlines, fall back to the platform default
if not isinstance(newline, str):
newline = None
try:
if file.suffix == ".py" or (file_type == "py" and file.name == "-"):
misformatted, errors = _process_python(
check,
file,
input_string,
line_length,
manager,
raw_output,
lock,
newline,
)
error_count += errors
elif (
file.suffix in ([".rst", ".txt"] if include_txt else [".rst"])
or file.name == "-"
):
misformatted, errors = _process_rst(
check,
file,
input_string,
line_length,
manager,
raw_output,
lock,
newline,
)
error_count += errors
except InvalidRstErrors as errors:
reporter.error(str(errors))
error_count += 1
reporter.print(f"Failed to format {str(file)!r}")
except Exception as error:
reporter.error(f"{error.__class__.__name__}: {error}")
error_count += 1
reporter.print(f"Failed to format {str(file)!r}")
return misformatted, error_count
def _parse_pyproject_config(
context: click.Context, param: click.Parameter, value: Optional[str]
) -> Mode:
if not value:
pyproject_toml = find_pyproject_toml(tuple(context.params.get("files", (".",))))
value = pyproject_toml if pyproject_toml else None
if value:
try:
pyproject_toml = toml.load(value)
config = pyproject_toml.get("tool", {}).get("docstrfmt", {})
config = {
k.replace("--", "").replace("-", "_"): v for k, v in config.items()
}
except (OSError, ValueError) as e: # pragma: no cover
raise click.FileError(
filename=value, hint=f"Error reading configuration file: {e}"
)
if config:
for key in ["exclude", "extend_exclude", "files"]:
config_value = config.get(key)
if config_value is not None and not isinstance(config_value, list):
raise click.BadOptionUsage(key, f"Config key {key} must be a list")
params = {}
if context.params is not None:
params.update(context.params)
params.update(config)
context.params = params
black_config = parse_pyproject_toml(value)
black_config.pop("exclude", None)
black_config.pop("extend_exclude", None)
target_version = black_config.pop("target_version", ["PY37"])
if target_version:
target_version = set(
getattr(TargetVersion, version.upper())
for version in target_version
if hasattr(TargetVersion, version.upper())
)
black_config["target_versions"] = target_version
return Mode(**black_config)
else:
return Mode(line_length=88)
def _parse_sources(
context: click.Context, param: click.Parameter, value: Optional[List[str]]
):
sources = value or context.params.get("files", [])
exclude = list(context.params.get("exclude", DEFAULT_EXCLUDE))
extend_exclude = list(context.params.get("extend_exclude", []))
exclude.extend(extend_exclude)
include_txt = context.params.get("include_txt", False)
files_to_format = set()
extensions = [".py", ".rst"] + ([".txt"] if include_txt else [])
for source in sources:
if source == "-":
files_to_format.add(source)
else:
for item in glob.iglob(source, recursive=True):
path = Path(item)
if path.is_dir():
for file in [
found
for extension in extensions
for found in glob.iglob(
f"{path}/**/*{extension}", recursive=True
)
]:
files_to_format.add(abspath(file))
elif path.is_file():
files_to_format.add(abspath(item))
for file in exclude:
if isdir(file):
file = join(file, "*")
for f in map(abspath, glob.iglob(file, recursive=True)):
if f in files_to_format:
files_to_format.remove(f)
return sorted(list(files_to_format))
def _process_python(
check,
file,
input_string,
line_length,
manager,
raw_output,
lock=None,
newline=None,
):
filename = basename(file)
object_name = filename.split(".")[0]
visitor = Visitor(object_name, file, line_length, manager)
module = cst.parse_module(input_string)
wrapper = cst.MetadataWrapper(module)
result = wrapper.visit(visitor)
error_count = visitor.error_count
misformatted = False
if visitor.misformatted:
misformatted = True
if check and not raw_output:
reporter.print(f"File {str(file)!r} could be reformatted.")
else:
if file == "-" or raw_output:
with lock or nullcontext():
_write_output(
file, result.code, nullcontext(sys.stdout), raw_output
)
else:
_write_output(
file,
result.code,
open(file, "w", encoding="utf-8", newline=newline),
raw_output,
)
elif raw_output:
with lock or nullcontext():
_write_output(file, input_string, nullcontext(sys.stdout), raw_output)
return misformatted, error_count
def _process_rst(
check,
file,
input_string,
line_length,
manager,
raw_output,
lock=None,
newline=None,
):
doc = manager.parse_string(file, input_string)
if reporter.level >= 3:
reporter.debug("=" * 60)
reporter.debug(dump_node(doc))
output = manager.format_node(line_length, doc)
error_count = manager.error_count
misformatted = False
if output == input_string:
reporter.print(f"File {str(file)!r} is formatted correctly. Nice!", 1)
if raw_output:
with lock or nullcontext():
_write_output(file, input_string, nullcontext(sys.stdout), raw_output)
else:
misformatted = True
if check and not raw_output:
reporter.print(f"File {str(file)!r} could be reformatted.")
else:
if file == "-" or raw_output:
with lock or nullcontext():
_write_output(file, output, nullcontext(sys.stdout), raw_output)
else:
_write_output(
file,
output,
open(file, "w", encoding="utf-8", newline=newline),
raw_output,
)
return misformatted, error_count
def _write_output(file, | |
<reponame>zhengp0/regm
"""
Variable module
"""
from collections.abc import Iterable
from copy import deepcopy
from dataclasses import dataclass, field
from typing import List, Union
import numpy as np
from xspline import XSpline
from regmod.data import Data
from regmod.prior import (Prior, GaussianPrior, UniformPrior,
LinearPrior, LinearGaussianPrior, LinearUniformPrior,
SplinePrior, SplineGaussianPrior, SplineUniformPrior)
from regmod.utils import SplineSpecs
@dataclass
class Variable:
"""Variable class is in charge of storing information of variable including
name and priors, and accessing data in the data frame. Name correspondes to
column name in the data frame and priors are used to compute the likelihood.
Parameters
----------
name : str
Name of the variable corresponding to the column name in the data frame.
priors : List[Prior], optional
A list of priors for the variable. Default is an empty list.
Attributes
----------
size
name : str
Name of the variable corresponding to the column name in the data frame.
priors : List[Prior]
A list of priors for the variable.
gprior : GaussianPrior
Direct Gaussian prior in `priors`.
uprior : UniformPrior
Direct Uniform prior in `priors`.
Methods
-------
process_priors()
Check the prior type and extract `gprior` and `uprior`.
check_data(data)
Check if the data contains the column name `name`.
reset_prior()
Reset direct priors.
add_priors(priors)
Add priors.
rm_priors(indices)
Remove priors.
get_mat(data)
Get design matrix.
get_gvec()
Get direct Gaussian prior vector.
get_uvec()
Get direct Uniform prior vector.
copy()
Copy current instance.
Notes
-----
In the future, this class will be combined with the SplineVariable.
"""
name: str
priors: List[Prior] = field(default_factory=list, repr=False)
gprior: GaussianPrior = field(default=None, init=False, repr=False)
uprior: UniformPrior = field(default=None, init=False, repr=False)
def __post_init__(self):
self.process_priors()
def process_priors(self):
"""Check the prior type and extract `gprior` and `uprior`.
Raises
------
AssertionError
Raised if direct Gaussian prior size not match.
AssertionError
Raised if direct Uniform prior size not match.
ValueError
Raised when any prior in the list is not an instance of Prior.
"""
for prior in self.priors:
if isinstance(prior, LinearPrior):
continue
if isinstance(prior, GaussianPrior):
if self.gprior is not None:
self.priors.remove(self.gprior)
self.gprior = prior
assert self.gprior.size == self.size, \
"Gaussian prior size not match."
elif isinstance(prior, UniformPrior):
if self.uprior is not None:
self.priors.remove(self.uprior)
self.uprior = prior
assert self.uprior.size == self.size, \
"Uniform prior size not match."
else:
raise ValueError("Unknown prior type.")
def check_data(self, data: Data):
"""Check if the data contains the column name `name`.
Parameters
----------
data : Data
Data object to be checked.
Raises
------
ValueError
Raised if data doesn't contain column name `self.name`.
"""
if self.name not in data.col_covs:
raise ValueError(f"Data do not contain column {self.name}")
@property
def size(self) -> int:
"""Size of the variable."""
return 1
def reset_priors(self) -> None:
"""Reset direct priors."""
self.gprior = None
self.uprior = None
def add_priors(self, priors: Union[Prior, List[Prior]]) -> None:
"""Add priors.
Parameters
----------
priors : Union[Prior, List[Prior]]
Priors to be added.
"""
if not isinstance(priors, list):
priors = [priors]
self.priors.extend(priors)
self.process_priors()
def rm_priors(self, indices: Union[int, List[int], List[bool]]) -> None:
"""Remove priors.
Parameters
----------
indices : Union[int, List[int], List[bool]]
Indicies of the priors that need to be removed. Indicies come in the
forms of integer, list of integers or list of booleans. When it is
integer or list of integers, it requires the integer is within the
bounds `[0, len(self.priors))`. When it is booleans, it requires the
list have the same length with `self.priors`.
Raises
------
AssertionError
Raised when `indices` has the wrong type.
AssertionError
Raised when `indices` is a list with mixed types.
AssertionError
Raised when `indices` is list of booleans but with different length
compare to `self.priors`.
"""
if isinstance(indices, int):
indices = [indices]
else:
assert isinstance(indices, Iterable), \
"Indies must be int, List[int], or List[bool]."
if all([not isinstance(index, bool) and isinstance(index, int)
for index in indices]):
indices = [i in indices for i in range(len(self.priors))]
assert all([isinstance(index, bool) for index in indices]), \
"Index type not consistent."
assert len(indices) == len(self.priors), \
"Index size not match with number of priors."
self.priors = [self.priors[i] for i, index in enumerate(indices)
if not index]
self.reset_priors()
self.process_priors()
def get_mat(self, data: Data) -> np.ndarray:
"""Get design matrix.
Parameters
----------
data : Data
Data object that provides the covariates.
Returns
-------
np.ndarray
Design matrix.
"""
self.check_data(data)
return data.get_covs(self.name)
def get_gvec(self) -> np.ndarray:
"""Get direct Gaussian prior vector.
Returns
-------
np.ndarray
Direct Gaussian prior vector.
"""
if self.gprior is None:
gvec = np.repeat([[0.0], [np.inf]], self.size, axis=1)
else:
gvec = np.vstack([self.gprior.mean, self.gprior.sd])
return gvec
def get_uvec(self) -> np.ndarray:
"""Get direct Uniform prior vector.
Returns
-------
np.ndarray
Direct Uniform prior vector.
"""
if self.uprior is None:
uvec = np.repeat([[-np.inf], [np.inf]], self.size, axis=1)
else:
uvec = np.vstack([self.uprior.lb, self.uprior.ub])
return uvec
def copy(self) -> "Variable":
"""Copy current instance.
Returns
-------
Variable
Current instance.
"""
return deepcopy(self)
@dataclass
class SplineVariable(Variable):
"""Spline variable that store information of variable with splines.
Parameters
----------
spline : XSpline, optional
Spline object that in charge of creating design matrix. Default to be
`None`. `spline` and `spline_specs` cannot be `None` at the same time.
spline_specs : SplineSpecs, optional
Spline settings used to create spline object. Recommend to use only when
use `knots_type={'rel_domain', 'rel_freq'}. Default to be `None`.
linear_gpriors : List[LinearPrior], optional
A list of linear Gaussian priors usually for shape priors of the spline.
Default to be an empty list.
linear_upriors : List[LinearPrior], optional
A list of linear Uniform priors usually for shape priors of the spline.
spline. Default to be an empty list.
Attributes
----------
spline : XSpline
Spline object that in charge of creating design matrix.
spline_specs : SplineSpecs
Spline settings used to create spline object.
linear_gpriors : List[LinearPrior]
A list of linear Gaussian priors usually for shape priors of the spline.
linear_upriors : List[LinearPrior]
A list of linear Uniform priors usually for shape priors of the spline.
Methods
-------
check_data(data)
Check if the data contains the column name `name`. And create the spline
object, if only `spline_specs` is provided.
process_priors()
Check the prior type and extract `gprior`, `uprior`, `linear_gpriors`
and `linear_upriors`.
reset_priors()
Reset direct and linear priors.
get_mat(data)
Get design matrix.
get_linear_gvec()
Get linear Gaussian prior vector.
get_linear_uvec()
Get linear Uniform prior vector.
get_linear_gmat(data)
Get linear Gaussian prior design matrix.
get_linear_umat(data)
Get linear Uniform prior design matrix.
"""
spline: XSpline = field(default=None, repr=False)
spline_specs: SplineSpecs = field(default=None, repr=False)
linear_gpriors: List[LinearPrior] = field(default_factory=list, repr=False)
linear_upriors: List[LinearPrior] = field(default_factory=list, repr=False)
def __post_init__(self):
if (self.spline is None) and (self.spline_specs is None):
raise ValueError("At least one of spline and spline_specs is not None.")
self.process_priors()
def check_data(self, data: Data):
"""Check if the data contains the column name `name`. And create the
spline object, if only `spline_specs` is provided.
Parameters
----------
data : Data
Data object to be checked.
"""
super().check_data(data)
if self.spline is None:
cov = data.get_cols(self.name)
self.spline = self.spline_specs.create_spline(cov)
for prior in self.linear_upriors + self.linear_gpriors:
if isinstance(prior, SplinePrior):
prior.attach_spline(self.spline)
def process_priors(self):
"""Check the prior type and extract `gprior`, `uprior`, `linear_gpriors`
and `linear_upriors`.
Raises
------
AssertionError
Raised if direct Gaussian prior size not match.
AssertionError
Raised if direct Uniform prior size not match.
ValueError
Raised when any prior in the list is not an instance of Prior.
"""
for prior in self.priors:
if isinstance(prior, (SplineGaussianPrior, LinearGaussianPrior)):
self.linear_gpriors.append(prior)
elif isinstance(prior, (SplineUniformPrior, LinearUniformPrior)):
self.linear_upriors.append(prior)
elif isinstance(prior, GaussianPrior):
if self.gprior is not None:
self.priors.remove(self.gprior)
self.gprior = prior
assert self.gprior.size == self.size, \
"Gaussian prior size not match."
elif isinstance(prior, UniformPrior):
if self.uprior is not None:
self.priors.remove(self.uprior)
self.uprior = prior
assert self.uprior.size == self.size, \
"Uniform prior size not match."
else:
raise ValueError("Unknown prior type.")
@property
def size(self) -> int:
"""Size of the variable."""
if self.spline is not None:
n = self.spline.num_spline_bases
else:
n = self.spline_specs.num_spline_bases
return n
def reset_priors(self):
"""Reset direct and linear priors."""
self.gprior = None
self.uprior = None
self.linear_gpriors = list()
self.linear_upriors = list()
def get_mat(self, data: Data) -> np.ndarray:
"""Get design matrix.
Parameters
----------
data : Data
Data object that provides the covariates.
Returns
-------
np.ndarray
Design matrix.
"""
self.check_data(data)
cov = data.get_cols(self.name)
return self.spline.design_mat(cov, l_extra=True, r_extra=True)
def get_linear_uvec(self) -> np.ndarray:
"""Get linear Uniform prior vector.
Returns
-------
np.ndarray
Linear uniform prior vector.
| |
A_frag = set(ugraph.bfsearch1d(idxB,idxA))
B_frag = set(ugraph.bfsearch1d(idxA,idxB))
# Compare fragments. They may be equal in case of cyclic systems
if (B_frag is None) or (A_frag is None) or (B_frag == A_frag):
return None
# Choose smaller fragment
if len(A_frag) > len(B_frag):
# if B_frag, rotation around A-->B
x0 = xvector[3*idxA:3*idxA+3]
axis = xvector[3*idxB:3*idxB+3] - x0
target_fragment = B_frag.copy()
else:
# if A_frag, rotation around B-->A
x0 = xvector[3*idxB:3*idxB+3]
axis = xvector[3*idxA:3*idxA+3] - x0
target_fragment = A_frag.copy()
axis = axis / np.linalg.norm(axis)
# Remove indices of the bond
target_fragment.discard(idxA)
target_fragment.discard(idxB)
# Get rotation matrix
R = gen_rotmatrix(axis,theta)
# Rotate atoms in fragment
rotated_xyz = []
for idx in range(natoms):
xyz = xvector[3*idx:3*idx+3]
if idx in target_fragment:
xyz = xyz - x0
xyz = R * np.matrix(xyz).transpose()
xyz = np.array((xyz.transpose()).tolist()[0])
xyz = xyz + x0
rotated_xyz += xyz.tolist()
return rotated_xyz
#===============================================#
#===============================================#
# Functions related to frequencies #
#===============================================#
def afreq2wnum(angfreq):
return angfreq / pc.TWOPI / pc.C0
#-----------------------------------------------#
def wnum2afreq(wavenum):
return wavenum * pc.TWOPI * pc.C0
#-----------------------------------------------#
def eval2afreq(evalue,mu=1.0/pc.AMU):
return sign(evalue) * (abs(evalue)/mu)**0.5
#-----------------------------------------------#
def eval2wnum(evalue,mu=1.0/pc.AMU):
return wnum2afreq(eval2afreq(evalue,mu))
#-----------------------------------------------#
def afreq2zpe(angfreq):
if angfreq < 0.0: return 0.0
return pc.HBAR * angfreq / 2.0
#-----------------------------------------------#
def afreq2turnpoint(angfreq,mu):
if angfreq < 0.0: return 1e10
return np.sqrt( pc.HBAR / angfreq / mu )
#-----------------------------------------------#
def wnum2zpe(wavenum):
angfreq = wnum2afreq(wavenum)
if angfreq < 0.0: return 0.0
return pc.HBAR * angfreq / 2.0
#-----------------------------------------------#
def eval2cm(evalue,mu=1.0/pc.AMU):
return eval2wnum(evalue,mu)/pc.CM
#-----------------------------------------------#
def afreq2cm(angfreq):
return afreq2wnum(angfreq)/pc.CM
#-----------------------------------------------#
def cm2afreq(cm):
return wnum2afreq(cm * pc.CM)
#-----------------------------------------------#
def afreq2eV(angfreq):
return afreq2wnum(angfreq)/pc.CM
#-----------------------------------------------#
def same_freqs(ccfreqs,icfreqs,epsilon=EPS_CCIC):
# compare lengths
if len(ccfreqs) != len(icfreqs):
return False
# compare freqs
for ccf, icf in zip(ccfreqs,icfreqs):
ccf = afreq2cm(ccf)
icf = afreq2cm(icf)
if abs(ccf-icf) > epsilon: return False
return True
#-----------------------------------------------#
def numimag(freqs):
return [freq<0.0 for freq in freqs].count(True)
#===============================================#
#===============================================#
# Rotations/Vibrations #
#===============================================#
def get_itensor_matrix(xcc,masses):
''' returns inertia tensor (au)'''
nat = howmanyatoms(xcc)
inertia = [[0.0 for i in range(3)] for j in range(3)]
for i in range(nat):
# Diagonal elements
inertia[0][0] += masses[i] * (y(xcc,i)**2 + z(xcc,i)**2)
inertia[1][1] += masses[i] * (z(xcc,i)**2 + x(xcc,i)**2)
inertia[2][2] += masses[i] * (x(xcc,i)**2 + y(xcc,i)**2)
# Offdiagonal elements
inertia[0][1] -= masses[i] * x(xcc,i) * y(xcc,i)
inertia[0][2] -= masses[i] * z(xcc,i) * x(xcc,i)
inertia[1][2] -= masses[i] * y(xcc,i) * z(xcc,i)
inertia[1][0] = inertia[0][1]
inertia[2][0] = inertia[0][2]
inertia[2][1] = inertia[1][2]
return inertia
#---------------------------------------------#
def get_itensor_evals(itensor):
evalsI, evecsI = np.linalg.eigh(itensor)
Ia, Ib, Ic = evalsI
bool_a = abs(Ia) < EPS_INERTIA # Ia = 0
bool_ab = abs(Ia/Ib-1.0) < EPS_FLOAT # Ia = Ib
bool_bc = abs(Ib/Ic-1.0) < EPS_FLOAT # Ib = Ic
bool_abc = bool_ab and bool_bc
if bool_abc : rtype = "spherical top"
elif bool_ab : rtype = "oblate symmetric top"
elif bool_bc :
if bool_a: rtype = "linear rotor"
else : rtype = "prolate symmetric top"
else : rtype = "asymmetric top"
if rtype == "linear rotor":
linear = True
evalsI = [evalsI[1]]
else:
linear = False
# rotational temperature
rotTs = [pc.HBAR**2 / (2*Ii*pc.KB) for Ii in evalsI]
return evalsI, rotTs, rtype, linear
#-----------------------------------------------#
def get_projectionmatrix(xcc,masses,v0=None):
'''
Generates matrix to project translation
and rotation coordinates (mass scaled/weighted)
Other coordinate can be projected by introducing it
using v0 (in mass-scaled)
'''
vecs = []
nat = len(masses)
# PROJECT TRA IN HESS FOR FREQS
if PROJECT_TRA:
# translation
sqrtmasses = [np.sqrt(mass) for mass in masses]
b1 = [term if ii==0 else 0.0 for term in sqrtmasses for ii in range(3)]
b2 = [term if ii==1 else 0.0 for term in sqrtmasses for ii in range(3)]
b3 = [term if ii==2 else 0.0 for term in sqrtmasses for ii in range(3)]
norm1 = np.linalg.norm(b1)
norm2 = np.linalg.norm(b2)
norm3 = np.linalg.norm(b3)
b1 /= norm1
b2 /= norm2
b3 /= norm3
vecs += [b1,b2,b3]
# PROJECT ROT IN HESS FOR FREQS
if PROJECT_ROT:
# rotation
b4 = np.zeros(len(xcc))
b5 = np.zeros(len(xcc))
b6 = np.zeros(len(xcc))
for i in range(nat):
b4[3*i + 1] = np.sqrt(masses[i]) * z(xcc,i)
b4[3*i + 2] = - np.sqrt(masses[i]) * y(xcc,i)
b5[3*i + 0] = - np.sqrt(masses[i]) * z(xcc,i)
b5[3*i + 2] = np.sqrt(masses[i]) * x(xcc,i)
b6[3*i + 0] = np.sqrt(masses[i]) * y(xcc,i)
b6[3*i + 1] = - np.sqrt(masses[i]) * x(xcc,i)
norm4 = np.linalg.norm(b4)
norm5 = np.linalg.norm(b5)
norm6 = np.linalg.norm(b6)
if norm4 > EPS_NORM: b4 /= norm4; vecs.append(b4)
if norm5 > EPS_NORM: b5 /= norm5; vecs.append(b5)
if norm6 > EPS_NORM: b6 /= norm6; vecs.append(b6)
# Gram Schmidt
if len(vecs) != 0:
X = np.matrix(vecs).transpose()
X_gs, R = np.linalg.qr(X)
projmatrix = X_gs * X_gs.H
else:
projmatrix = np.zeros( (3*nat,3*nat) )
# PROJECT GRADIENT
if v0 is not None:
normv0 = np.linalg.norm(v0)
if normv0 > EPS_NORM:
v0 = np.matrix( v0 ) / normv0
projmatrix += v0.transpose() * v0
return projmatrix
#-----------------------------------------------#
def project_hessian(Fms,natoms,proj_matrix):
''' Fms: hessian in mass-scaled '''
# identity matrix
I = np.identity(3*natoms)
# Calculate projected hessian matrix
Fms_proj = (I - proj_matrix) * Fms * (I - proj_matrix)
return Fms_proj
#-----------------------------------------------#
def diagonalize_hessian(Fms,mu=1.0/pc.AMU):
# as Fms is symmetric --> diagonalize with eigh
evalsF, evecsF = np.linalg.eigh(Fms)
# Convert evals to angular frequencies
ccfreqs = [eval2afreq(v,mu) for v in evalsF]
# evecsF to list of eigenvectors
evecsF = evecsF.transpose().tolist()
# return data
return ccfreqs, evalsF, evecsF
#-----------------------------------------------#
def detect_frozen(Fcc,nat):
'''
Fcc --> 3Nx3N
'''
frozen = []
if Fcc is None or len(Fcc) == 0: return frozen
Fcc = np.matrix(Fcc)
for at in range(nat):
# get columns (rows are equivalent; symmetric matrix)
colx = Fcc[:][3*at+0].tolist()[0]
coly = Fcc[:][3*at+1].tolist()[0]
colz = Fcc[:][3*at+2].tolist()[0]
# Get norm of column
normx = np.linalg.norm(colx)
normy = np.linalg.norm(coly)
normz = np.linalg.norm(colz)
# are columns made of zeros?
if normx != 0.0: continue
if normy != 0.0: continue
if normz != 0.0: continue
frozen.append(at)
return frozen
#-----------------------------------------------#
def calc_ccfreqs(Fcc,masses,xcc,mu=1.0/pc.AMU,v0=None):
'''
xcc has to be centered at com
v0 in case gradient has to be removed
'''
# num of atoms and Fcc in matrix format
nat = len(masses)
if len(Fcc) != 3*nat: Fcc = lowt2matrix(Fcc)
# Frozen atoms?
frozen = detect_frozen(Fcc,nat)
boolN = np.array([at not in frozen for at in range(nat)])
bool3N = np.array([at not in frozen for at in range(nat) for ii in range(3)])
# if FROZEN atoms, then reduce system!!
if len(frozen) != 0:
masses = np.array(masses)[boolN]
xcc = np.array(xcc)[bool3N]
if v0 is not None: v0 = np.array(v0)[bool3N]
# force constant matrix
Fcc = [[Fcc[idx1][idx2] for idx1 in range(3*nat) if bool3N[idx1]]\
for idx2 in range(3*nat) if bool3N[idx2]]
# num atoms
nat = len(masses)
# re-center system
xcc = shift2com(xcc,masses)
# Analyze system
linear = islinear(xcc)
if linear: nvdof = 3*nat-5
else : nvdof = 3*nat-6
if v0 is not None: nvdof -= 1
# mass-scaled hessian
Fms = cc2ms_F(Fcc,masses,mu=mu)
Fms = np.matrix(Fms)
# projection matrix
pmatrix = get_projectionmatrix(xcc,masses,v0)
# projected hessian
Fms = project_hessian(Fms,nat,pmatrix)
# Diagonalization
ccfreqs, evalsF, evecsF = diagonalize_hessian(Fms,mu)
# remove extra frequencies
idxs = sorted([(abs(fq),fq,idx) for idx,fq in enumerate(ccfreqs)])
idxs.reverse()
idxs = idxs[:nvdof]
idxs = sorted([(fq,idx) for absfq,fq,idx in idxs])
idxs = [idx for fq,idx in idxs]
ccfreqs = [ccfreqs[idx] for idx in idxs]
evalsF = [ evalsF[idx] for idx in idxs]
evecsF = [ evecsF[idx] for idx in idxs]
# Consider the removed atoms in the eigenvectors
if len(frozen) != 0:
for idx,evecF in enumerate(evecsF):
evecsF[idx] = [evecF.pop(0) if booli else 0.0 for booli in bool3N]
return ccfreqs, evalsF, evecsF
#-----------------------------------------------#
def scale_freqs(freqs,fscal):
return [freq*fscal for freq in freqs]
#===============================================#
#=============================================#
# Functions related to extrema #
#=============================================#
def minima_in_list(lx,ly):
'''
in the list, it find points
that may be local minima
Returns the list of x-guesses
'''
np = len(lx)
# initialize guesses
guesses = []
# initial point
if ly[0] <= ly[1]:
guesses.append(lx[0])
# mid points
for idx in range(1,np-1):
xi, yi = lx[idx-1],ly[idx-1]
xj, yj = lx[idx ],ly[idx ]
xk, yk = lx[idx+1],ly[idx+1]
if yj <= yi and yj <= yk: guesses.append(xj)
# final points
if ly[-1] <= ly[-2]:
guesses.append(lx[-1])
return guesses
#=============================================#
#=============================================#
# Functions to print certains variables #
#=============================================#
def print_matrix(hessian,f="%+6.3f"):
nrows, ncols = hessian.shape
l = len(f%1.0)
sint = "%%%ii"%l
STRING = " * shape = %ix%i\n"%(nrows,ncols)
STRING += " "*8 +" ".join([sint%(ii+1) for ii in range(ncols)])+"\n"
for row in range(nrows):
STRING += "%6i "%(row+1) + " ".join([f%value for value in hessian[row,:].tolist()[0]])+"\n"
STRING | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 11 09:37:49 2018
Modified on 11/27/2018 to clean up comments
Modified Wed Dec 5 2018 (Fix Issue 2, Handle DC Loads)
Modified on 02/25/2019 for version 0.1.0
Modified on Wed 01/20/2021 to add computeOutputResults
@author: <NAME>
-------------------------------------------------------------------------------
Name: PVUtilities.py
Purpose: Define utilities found useful in building PV System Simulator
Copyright: (c) <NAME> 2018
License: GNU General Public License, version 3 (GPL-3.0)
This program is distributed WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE.
-------------------------------------------------------------------------------
"""
import numpy as np
import math
import pandas as pd
import os.path
import requests
import csv
from urllib.request import urlopen
from datetime import date
def dfcell_is_empty(cell_value):
""" Return True if Dataframe cell contains NaN """
return np.isnan(cell_value)
def entry_is_empty(data):
""" Tests for a null data field """
if data == None or data == "":
return True
return False
def eval_dfcell(cell_value):
""" Returns None if cell is NaN else, the value """
if dfcell_is_empty(cell_value):
return None
return cell_value
def convert_string_to_hrs(info):
""" Returns the Hour component of a time string """
st = info.find('T')+1
tlist = info[st:].split(':')
h = 0.0
h += float(tlist[0])
h += float(tlist[1])/60
h += float(tlist[2])/3600
return h
def convert_to_dec_hrs(time):
""" Returns the decimal Hour for a time string of form HH:MM:SS.xxx """
h= time.hour*1.0
h += time.minute/60.0
h += time.second/3600.0
return h
def month_timestamp(ts):
""" Produces an Numpy Array of the Integer Month
from a Panda DateTimeIndex Series """
k = list()
for t in ts:
k.append(t.month)
return np.array(k)
def doy_timestamp(ts):
""" Produces an Numpy Array of the Integer Day Of Year
from a Panda DateTimeIndex Series """
k = list()
for t in ts:
k.append(t.dayofyear)
return np.array(k)
def dom_timestamp(ts):
""" Produces an Numpy Array of the Integer Day Of Month
from a Panda DateTimeIndex Series """
k = list()
mon = -1
doy = -1
dom = 0
for t in ts:
m = t.month
if mon != m:
dom = 1
doy = t.dayofyear
mon = m
elif t.dayofyear != doy:
dom += 1
doy = t.dayofyear
k.append(dom)
return np.array(k)
def create_time_indices(tm_z):
""" Create Base Dataframe indicies for use in running simulations """
now = date.today()
baseyear = now.year-2
if baseyear%4 == 0:
# Don't use leap year
baseyear -= 1
st = '{0}0101T0000{1:+}'.format(baseyear, tm_z)
nt = '{0}1231T2300{1:+}'.format(baseyear, tm_z)
times = pd.date_range(start= st,
end= nt,
freq='H')
months = month_timestamp(times).astype(int)
days_of_year = doy_timestamp(times).astype(int)
days_of_month = dom_timestamp(times).astype(int)
timedf = pd.DataFrame({'Month':months,
'DayofYear': days_of_year,
'DayofMonth': days_of_month},
index = times)
return timedf
def hourly_load(times, load):
""" Create a Data Frame of Hourly Load in Watts"""
lngth = len(times)
hlc = np.zeros((lngth, 3))
for i in range(lngth):
hlc[i, 0] = load['AC'].iloc[i%24]
hlc[i, 1] = load['DC'].iloc[i%24]
hlc[i, 2] = load['Total'].iloc[i%24]
return pd.DataFrame(data=hlc, index=times,
columns=['AC_Load', 'DC_Load', 'Total_Load'])
def hourly_temp(avT, maxT, minT, cur_t, rise_t, set_t, trans_t, offset= 2):
""" Estimate hourly temperature for cur_t of day
assumes, temp follows sine curve, with max temp at
solar noon plus offset (typically 2hrs) """
ct = convert_string_to_hrs(cur_t)
sunrise = convert_to_dec_hrs(rise_t)
sunset = convert_to_dec_hrs(set_t)
dur = convert_to_dec_hrs(set_t)
pkhr = sunrise + 0.5*dur + offset
d_tmp = maxT - minT
z = avT + d_tmp*math.sin(2*np.pi*((ct-pkhr)/24))
return z
def hourly_speed(avS, maxS, minS, cur_t, rise_t, set_t, trans_t, offset= 2):
""" Estimate hourly temperature for cur_t of day
assumes, temp follows sine curve, with max temp at
solar noon plus offset (typically 2hrs) """
ct = convert_string_to_hrs(cur_t)
sunrise = convert_to_dec_hrs(rise_t)
sunset = convert_to_dec_hrs(set_t)
dur = convert_to_dec_hrs(set_t)
pkhr = sunrise + 0.5*dur + offset
d_spd = (maxS - minS)
return abs(avS - d_spd*math.sin(2*np.pi*(ct-pkhr)/24))
def read_resource(filename, dirptr):
""" Method to retrieve data from the resources csv file and generate a
Panadas Dataframe of the contents """
fn = os.path.join(dirptr, filename)
return pd.read_csv(fn, index_col=0, skiprows=[1,2])
def read_web_resource(url, dirptr, filename):
""" Method to retireve data from web url and create a file
with filename within the designated dirptr """
fp = os.path.join(dirptr, filename)
try:
response = urlopen(url)
cr = csv.reader(response.read().decode('utf-8'))
return True
except requests.exceptions.ConnectionError:
return False
def build_monthly_summary(df, select_value):
""" Summarizes df contents for select_value parameter
over an entire year """
month_list = np.array(['Jan', 'Feb', 'Mar', 'Apr',
'May', 'Jun', 'Jul', 'Aug',
'Sep', 'Oct', 'Nov', 'Dec'])
dat_list = np.zeros([12,5])
for indx in range(12):
smpl_df = df.loc[df['Month'] == indx+1]
vals = smpl_df[select_value].groupby(smpl_df['DayofMonth']).sum()
dat_list[indx][0] = vals.sum()
dat_list[indx][1] = vals.mean()
dat_list[indx][2] = vals.max()
dat_list[indx][3] = vals.min()
dat_list[indx][4] = len(smpl_df)/24
rslt = pd.DataFrame(dat_list, month_list,
columns=['Total {0}'.format(select_value),
'Avg {0}'.format(select_value),
'Best {0}'.format(select_value),
'Worst {0}'.format(select_value),
'Days' ])
rslt.index.name= 'Months'
return rslt
def build_monthly_performance(df, param):
""" Using the dataframe df create Monthly Synopsis of
system performance for selected param
return 3 part tuple containing:
resulting array,
best day designator,
worst day designator
"""
rslt = []
rslt.append( build_monthly_summary(df, param))
rslt.append( find_best_doy(df, param))
rslt.append( find_worst_doy(df, param))
return rslt
def find_worst_doy(df, select_value):
""" returns a day_of_year where select_value is a minimum """
rslt_df = df[select_value].groupby(df['DayofYear']).sum()
mn = rslt_df.min()
for indx in range(len(rslt_df)):
if rslt_df.iloc[indx] == mn:
return indx + 1
raise IndexError('No worst day value found')
def find_best_doy(df, select_value):
""" returns a day_of_year where select_value is a maximum """
rslt_df = df[select_value].groupby(df['DayofYear']).sum()
mx = rslt_df.max()
for indx in range(len(rslt_df)):
if rslt_df.iloc[indx] == mx:
return indx + 1
raise IndexError('No best day value found')
def computOutputResults(attrb_dict, ArP, ArV, ArI, acLd, dcLd, wkDict):
"""Computes the controlled Voltage & current output used to either power
the load or charge/discharge a battery bank. Updates the
battery bank and contents of wkDict based on results of computations"""
"""## attrb_dict Contains the following elements: ###
'Bnk' - PVBattery Instance
'Inv' - PVInverter Instance
'Chg' - PVChgController Instance
"""
if attrb_dict['Inv'] != None and attrb_dict['Inv'].is_defined():
invFlg = True
else:
invFlg = False
if attrb_dict['Bnk'] != None and attrb_dict['Bnk'].is_defined():
bnkFlg = True
else:
bnkFlg = False
if attrb_dict['Chg'] != None and attrb_dict['Chg'].is_defined():
chgFlg = True
else:
chgFlg = False
if not chgFlg and not invFlg:
# No Charge Controller or Inverter in System
if dcLd > 0.0 and ArP >0.0:
# Load to service
if ArP > dcLd:
wkDict['PO'] = dcLd
else:
wkDict['PO'] = ArP
wkDict['DE'] = wkDict['PO']/ArP
wkDict['PS'] = wkDict['PO']/dcLd
else:
#InternalFunction Variables:
internal_parm = {
'stdbyPwr': (attrb_dict['Inv'].read_attrb('Pnt') if invFlg else 0 + #power draw for cntrlr/inverter units
attrb_dict['Chg'].read_attrb('c_cnsmpt') if chgFlg else 0),
'eff': min([(attrb_dict['Inv'].read_attrb('Paco')/attrb_dict['Inv'].read_attrb('Pdco')) if invFlg else 1.0,
(attrb_dict['Chg'].read_attrb('c_eff')/100) if chgFlg else 1.0]), #power conversion efficiency
'pvmxv': attrb_dict['Inv'].read_attrb('Vdcmax') if invFlg else attrb_dict['Chg'].read_attrb('c_pvmxv'), #maximum PV Voltage
'pvmxi': ((attrb_dict['Inv'].read_attrb('Pdco')/attrb_dict['Inv'].read_attrb('Vdco')) if invFlg
else attrb_dict['Chg'].read_attrb('c_pvmxi')), #maximum PV Current
'VmxChg': attrb_dict['Inv'].read_attrb('Vdcmax') if invFlg else attrb_dict['Chg'].read_attrb('c_mvchg'), #Maximum Charge Voltage
'ImxDchg': attrb_dict['Inv'].read_attrb('Idcmax') if invFlg else attrb_dict['Chg'].read_attrb('c_midschg'), #Maximum Discharge Current
'cntlType': attrb_dict['Chg'].read_attrb('c_type') if chgFlg else 'MPPT' #Controller Type either MPPT or PWM
}
sysLd = internal_parm['stdbyPwr']
totUsrLd = dcLd + acLd
if invFlg:
sysLd += attrb_dict['Inv'].compute_dc_power(acLd)
sysLd = ((totUsrLd + sysLd)/internal_parm['eff']) - totUsrLd
pload = totUsrLd + sysLd
vout = min(ArV, internal_parm['pvmxv'])
iout = min(ArI, internal_parm['pvmxi'])
drain = ArP - pload*1.1
#Test for Batbank and either charge state or ability to discharge
if bnkFlg and (drain >= 0 or (drain <0 and attrb_dict['Bnk'].is_okay())):
# A battery bank exists and it is usable for charging or discharging
bv = attrb_dict['Bnk'].get_volts()
if bv <= 0:
bv = 1
if drain >= 0:
vout = min(vout, internal_parm['VmxChg'])
bv = bv*1.2
if internal_parm['cntlType'] == 'MPPT':
iout = max(drain/vout, drain/bv)
else:
iout = min(drain/vout, drain/bv)
else:
# Discharge Battery state
if abs(drain) <= attrb_dict['Bnk'].current_power():
if vout == 0.0 or iout == 0.0:
vout = bv
iout = min(internal_parm['ImxDchg'], -drain/vout)
iout= -1* iout
else:
# Bnk can't provide needed power
if ArP < sysLd:
msg = 'Insufficient Array & Bank power to sustain System operation'
msg += '\n {0:.2f} watts needed but only {1:.2f} watts generated'
wkDict['Error'] = (msg.format(sysLd, ArP), 'Warning')
else:
iout = -1 * ((ArP-sysLd)/vout)
#update Bank State
attrb_dict['Bnk'].update_soc(iout, wkDict)
if ArP - wkDict['BD'] -pload >= 0.0:
#okay met pload requirements
wkDict['PO'] = pload
elif ArP - wkDict['BD'] - sysLd >= 0:
| |
<filename>src/mmd/PmxReader.py
# -*- coding: utf-8 -*-
#
import struct
import hashlib
from mmd.PmxData import PmxModel, Bone, RigidBody, Vertex, Material, Morph, DisplaySlot, RigidBody, Joint, Ik, IkLink, Bdef1, Bdef2, Bdef4, Sdef, Qdef # noqa
from module.MMath import MRect, MVector3D, MVector4D, MQuaternion, MMatrix4x4 # noqa
from utils.MLogger import MLogger # noqa
from utils.MException import SizingException, MKilledException, MParseException
logger = MLogger(__name__, level=1)
class PmxReader:
def __init__(self, file_path, is_check=True):
self.file_path = file_path
self.is_check = is_check
self.offset = 0
self.buffer = None
self.vertex_index_size = 0
self.texture_index_size = 0
self.material_index_size = 0
self.bone_index_size = 0
self.morph_index_size = 0
self.rigidbody_index_size = 0
def read_model_name(self):
model_name = ""
with open(self.file_path, "rb") as f:
# PMXファイルをバイナリ読み込み
self.buffer = f.read()
# logger.test("hashlib.algorithms_available: %s", hashlib.algorithms_available)
# pmx宣言
signature = self.unpack(4, "4s")
logger.test("signature: %s (%s)", signature, self.offset)
# pmxバージョン
version = self.read_float()
logger.test("version: %s (%s)", version, self.offset)
if signature[:3] != b"PMX" or (version != 2.0 and version != 2.1):
# 整合性チェック
raise MParseException("PMX2.0/2.1形式外のデータです。signature: {0}, version: {1} ".format(signature, version))
# flag
flag_bytes = self.read_int(1)
logger.test("flag_bytes: %s (%s)", flag_bytes, self.offset)
# エンコード方式
text_encoding = self.read_int(1)
logger.test("text_encoding: %s (%s)", text_encoding, self.offset)
# エンコードに基づいて文字列解凍処理を定義
self.read_text = self.define_read_text(text_encoding)
# 追加UV数
self.extended_uv = self.read_int(1)
logger.test("extended_uv: %s (%s)", self.extended_uv, self.offset)
# 頂点Indexサイズ
self.vertex_index_size = self.read_int(1)
logger.test("vertex_index_size: %s (%s)", self.vertex_index_size, self.offset)
self.read_vertex_index_size = lambda: self.read_int(self.vertex_index_size)
# テクスチャIndexサイズ
self.texture_index_size = self.read_int(1)
logger.test("texture_index_size: %s (%s)", self.texture_index_size, self.offset)
self.read_texture_index_size = lambda: self.read_int(self.texture_index_size)
# 材質Indexサイズ
self.material_index_size = self.read_int(1)
logger.test("material_index_size: %s (%s)", self.material_index_size, self.offset)
self.read_material_index_size = lambda: self.read_int(self.material_index_size)
# ボーンIndexサイズ
self.bone_index_size = self.read_int(1)
logger.test("bone_index_size: %s (%s)", self.bone_index_size, self.offset)
self.read_bone_index_size = lambda: self.read_int(self.bone_index_size)
# モーフIndexサイズ
self.morph_index_size = self.read_int(1)
logger.test("morph_index_size: %s (%s)", self.morph_index_size, self.offset)
self.read_morph_index_size = lambda: self.read_int(self.morph_index_size)
# 剛体Indexサイズ
self.rigidbody_index_size = self.read_int(1)
logger.test("rigidbody_index_size: %s (%s)", self.rigidbody_index_size, self.offset)
self.read_rigidbody_index_size = lambda: self.read_int(self.rigidbody_index_size)
# モデル名(日本語)
model_name = self.read_text()
logger.test("name: %s (%s)", model_name, self.offset)
return model_name
def read_data(self):
# Pmxモデル生成
pmx = PmxModel()
pmx.path = self.file_path
try:
# PMXファイルをバイナリ読み込み
with open(self.file_path, "rb") as f:
self.buffer = f.read()
# logger.test("hashlib.algorithms_available: %s", hashlib.algorithms_available)
# pmx宣言
signature = self.unpack(4, "4s")
logger.test("signature: %s (%s)", signature, self.offset)
# pmxバージョン
version = self.read_float()
logger.test("version: %s (%s)", version, self.offset)
if signature[:3] != b"PMX" or (version != 2.0 and version != 2.1):
# 整合性チェック
raise MParseException("PMX2.0/2.1形式外のデータです。signature: {0}, version: {1} ".format(signature, version))
# flag
flag_bytes = self.read_int(1)
logger.test("flag_bytes: %s (%s)", flag_bytes, self.offset)
# エンコード方式
text_encoding = self.read_int(1)
logger.test("text_encoding: %s (%s)", text_encoding, self.offset)
# エンコードに基づいて文字列解凍処理を定義
self.read_text = self.define_read_text(text_encoding)
# 追加UV数
self.extended_uv = self.read_int(1)
logger.test("extended_uv: %s (%s)", self.extended_uv, self.offset)
# 頂点Indexサイズ
self.vertex_index_size = self.read_int(1)
logger.test("vertex_index_size: %s (%s)", self.vertex_index_size, self.offset)
self.read_vertex_index_size = lambda: self.read_int(self.vertex_index_size)
# テクスチャIndexサイズ
self.texture_index_size = self.read_int(1)
logger.test("texture_index_size: %s (%s)", self.texture_index_size, self.offset)
self.read_texture_index_size = lambda: self.read_int(self.texture_index_size)
# 材質Indexサイズ
self.material_index_size = self.read_int(1)
logger.test("material_index_size: %s (%s)", self.material_index_size, self.offset)
self.read_material_index_size = lambda: self.read_int(self.material_index_size)
# ボーンIndexサイズ
self.bone_index_size = self.read_int(1)
logger.test("bone_index_size: %s (%s)", self.bone_index_size, self.offset)
self.read_bone_index_size = lambda: self.read_int(self.bone_index_size)
# モーフIndexサイズ
self.morph_index_size = self.read_int(1)
logger.test("morph_index_size: %s (%s)", self.morph_index_size, self.offset)
self.read_morph_index_size = lambda: self.read_int(self.morph_index_size)
# 剛体Indexサイズ
self.rigidbody_index_size = self.read_int(1)
logger.test("rigidbody_index_size: %s (%s)", self.rigidbody_index_size, self.offset)
self.read_rigidbody_index_size = lambda: self.read_int(self.rigidbody_index_size)
# モデル名(日本語)
pmx.name = self.read_text()
logger.test("name: %s (%s)", pmx.name, self.offset)
# モデル名(英語)
pmx.english_name = self.read_text()
logger.test("english_name: %s (%s)", pmx.english_name, self.offset)
# コメント(日本語)
pmx.comment = self.read_text()
logger.test("comment: %s (%s)", pmx.comment, self.offset)
# コメント(英語)
pmx.english_comment = self.read_text()
logger.test("english_comment: %s (%s)", pmx.english_comment, self.offset)
# 頂点データリスト
for vertex_idx in range(self.read_int(4)):
position = self.read_Vector3D()
normal = self.read_Vector3D()
uv = self.read_Vector2D()
extended_uvs = []
if self.extended_uv > 0:
# 追加UVがある場合
for _ in range(self.extended_uv):
extended_uvs.append(self.read_Vector4D())
deform = self.read_deform()
edge_factor = self.read_float()
# 頂点をウェイトボーンごとに分けて保持する
vertex = Vertex(vertex_idx, position, normal, uv, extended_uvs, deform, edge_factor)
for bone_idx in vertex.deform.get_idx_list():
if bone_idx not in pmx.vertices:
pmx.vertices[bone_idx] = []
pmx.vertices[bone_idx].append(vertex)
logger.test("len(vertices): %s", len(pmx.vertices))
logger.test("vertices.keys: %s", pmx.vertices.keys())
logger.info("-- PMX 頂点読み込み完了")
# 面データリスト
for _ in range(self.read_int(4)):
if self.vertex_index_size <= 2:
# 頂点サイズが2以下の場合、符号なし
pmx.indices.append(self.read_uint(self.vertex_index_size))
else:
pmx.indices.append(self.read_int(self.vertex_index_size))
logger.test("len(indices): %s", len(pmx.indices))
logger.info("-- PMX 面読み込み完了")
# テクスチャデータリスト
for _ in range(self.read_int(4)):
pmx.textures.append(self.read_text())
logger.test("len(textures): %s", len(pmx.textures))
logger.info("-- PMX テクスチャ読み込み完了")
# 材質データリスト
for material_idx in range(self.read_int(4)):
material = Material(
name=self.read_text(),
english_name=self.read_text(),
diffuse_color=self.read_RGB(),
alpha=self.read_float(),
specular_color=self.read_RGB(),
specular_factor=self.read_float(),
ambient_color=self.read_RGB(),
flag=self.read_int(1),
edge_color=self.read_RGBA(),
edge_size=self.read_float(),
texture_index=self.read_texture_index_size(),
sphere_texture_index=self.read_texture_index_size(),
sphere_mode=self.read_int(1),
toon_sharing_flag=self.read_int(1)
)
material.index = material_idx
if material.toon_sharing_flag == 0:
material.toon_texture_index = self.read_texture_index_size()
elif material.toon_sharing_flag == 1:
material.toon_texture_index = self.read_int(1)
else:
raise MParseException("unknown toon_sharing_flag {0}".format(material.toon_sharing_flag))
material.comment = self.read_text()
material.vertex_count = self.read_int(4)
pmx.materials[material.name] = material
pmx.material_indexes[material.index] = material.name
logger.test("len(materials): %s", len(pmx.materials))
logger.info("-- PMX 材質読み込み完了")
# サイジング用ルートボーン
sizing_root_bone = Bone("SIZING_ROOT_BONE", "SIZING_ROOT_BONE", MVector3D(), -1, 0, 0)
sizing_root_bone.index = -999
pmx.bones[sizing_root_bone.name] = sizing_root_bone
# インデックス逆引きも登録
pmx.bone_indexes[sizing_root_bone.index] = sizing_root_bone.name
# ボーンデータリスト
for bone_idx in range(self.read_int(4)):
bone = Bone(
name=self.read_text(),
english_name=self.read_text(),
position=self.read_Vector3D(),
parent_index=self.read_bone_index_size(),
layer=self.read_int(4),
flag=self.read_int(2)
)
if not bone.getConnectionFlag():
bone.tail_position = self.read_Vector3D()
elif bone.getConnectionFlag():
bone.tail_index = self.read_bone_index_size()
else:
raise MParseException("unknown bone conenction flag: {0}".format(bone.getConnectionFlag()))
if bone.getExternalRotationFlag() or bone.getExternalTranslationFlag():
bone.effect_index = self.read_bone_index_size()
bone.effect_factor = self.read_float()
if bone.getFixedAxisFlag():
bone.fixed_axis = self.read_Vector3D()
if bone.getLocalCoordinateFlag():
bone.local_x_vector = self.read_Vector3D()
bone.local_z_vector = self.read_Vector3D()
if bone.getExternalParentDeformFlag():
bone.external_key = self.read_int(4)
if bone.getIkFlag():
bone.ik = Ik(
target_index=self.read_bone_index_size(),
loop=self.read_int(4),
limit_radian=self.read_float()
)
# IKリンク取得
for _ in range(self.read_int(4)):
link = IkLink(
self.read_bone_index_size(),
self.read_int(1)
)
if link.limit_angle == 0:
pass
elif link.limit_angle == 1:
link.limit_min = self.read_Vector3D()
link.limit_max = self.read_Vector3D()
else:
raise MParseException("invalid ik link limit_angle: {0}".format(link.limit_angle))
bone.ik.link.append(link)
# ボーンのINDEX
bone.index = bone_idx
if bone.name not in pmx.bones:
# まだ未登録の名前のボーンの場合のみ登録
pmx.bones[bone.name] = bone
# インデックス逆引きも登録
pmx.bone_indexes[bone.index] = bone.name
# サイジング用ボーン ---------
# 頭頂ボーン
head_top_vertex = pmx.get_head_top_vertex()
pmx.head_top_vertex = head_top_vertex
head_top_bone = Bone("頭頂実体", "head_top", head_top_vertex.position.copy(), -1, 0, 0)
head_top_bone.index = len(pmx.bones.keys())
pmx.bones[head_top_bone.name] = head_top_bone
pmx.bone_indexes[head_top_bone.index] = head_top_bone.name
if "右足先EX" in pmx.bones or "右足IK" in pmx.bones:
# 右足底実体ボーン
right_sole_vertex = None
if "右足先EX" in pmx.bones:
right_sole_vertex = Vertex(-1, MVector3D(pmx.bones["右足先EX"].position.x(), 0, pmx.bones["右足先EX"].position.z()), MVector3D(), [], [], Bdef1(-1), -1)
elif "右足IK" in pmx.bones:
right_sole_vertex = pmx.get_sole_vertex("右")
if right_sole_vertex:
pmx.right_sole_vertex = right_sole_vertex
right_sole_bone = Bone("右足底実体", "right sole entity", right_sole_vertex.position.copy(), -1, 0, 0)
right_sole_bone.index = len(pmx.bones.keys())
if "右足先EX" in pmx.bones:
right_sole_bone.parent_index = pmx.bones["右足先EX"].index
else:
right_sole_bone.parent_index = pmx.bones["右足首"].index
pmx.bones[right_sole_bone.name] = right_sole_bone
pmx.bone_indexes[right_sole_bone.index] = right_sole_bone.name
if "左足先EX" in pmx.bones or "左足IK" in pmx.bones:
# 左足底実体ボーン
left_sole_vertex = None
if "左足先EX" in pmx.bones:
left_sole_vertex = Vertex(-1, MVector3D(pmx.bones["左足先EX"].position.x(), 0, pmx.bones["左足先EX"].position.z()), MVector3D(), [], [], Bdef1(-1), -1)
elif "左足IK" in pmx.bones:
left_sole_vertex = pmx.get_sole_vertex("左")
if left_sole_vertex:
pmx.left_sole_vertex = left_sole_vertex
left_sole_bone = Bone("左足底実体", "left sole entity", left_sole_vertex.position.copy(), -1, 0, 0)
left_sole_bone.index = len(pmx.bones.keys())
if "左足先EX" in pmx.bones:
left_sole_bone.parent_index = pmx.bones["左足先EX"].index
else:
left_sole_bone.parent_index = pmx.bones["左足首"].index
pmx.bones[left_sole_bone.name] = left_sole_bone
pmx.bone_indexes[left_sole_bone.index] = left_sole_bone.name
if "右足IK" in pmx.bones or "右つま先IK" in pmx.bones:
# 右つま先ボーン
right_toe_vertex = pmx.get_toe_vertex("右")
if right_toe_vertex:
pmx.right_toe_vertex = right_toe_vertex
right_toe_pos = right_toe_vertex.position.copy()
right_toe_pos.setY(0)
right_toe_bone = Bone("右つま先実体", "right toe entity", right_toe_pos, -1, 0, 0)
right_toe_bone.index = len(pmx.bones.keys())
if "右足底実体" in pmx.bones:
right_toe_bone.parent_index = pmx.bones["右足底実体"].index
else:
right_toe_bone.parent_index = pmx.bones["右足首"].index
pmx.bones[right_toe_bone.name] = right_toe_bone
pmx.bone_indexes[right_toe_bone.index] = right_toe_bone.name
if "左足IK" in pmx.bones or "左つま先IK" in pmx.bones:
# 左つま先ボーン
left_toe_vertex = pmx.get_toe_vertex("左")
if left_toe_vertex:
pmx.left_toe_vertex = left_toe_vertex
left_toe_pos = left_toe_vertex.position.copy()
left_toe_pos.setY(0)
left_toe_bone = Bone("左つま先実体", "left toe entity", left_toe_pos, -1, 0, 0)
left_toe_bone.index = len(pmx.bones.keys())
if "左足底実体" in pmx.bones:
left_toe_bone.parent_index = pmx.bones["左足底実体"].index
else:
left_toe_bone.parent_index = pmx.bones["左足首"].index
pmx.bones[left_toe_bone.name] = left_toe_bone
pmx.bone_indexes[left_toe_bone.index] = left_toe_bone.name
if "右足IK" in pmx.bones:
# 右足IK底実体ボーン
right_ik_sole_vertex = Vertex(-1, MVector3D(pmx.bones["右足IK"].position.x(), 0, pmx.bones["右足IK"].position.z()), MVector3D(), [], [], Bdef1(-1), -1)
pmx.right_ik_sole_vertex = right_ik_sole_vertex
right_ik_sole_bone = Bone("右足IK底実体", "right ik ik_sole entity", right_ik_sole_vertex.position.copy(), -1, 0, 0)
right_ik_sole_bone.index = len(pmx.bones.keys())
pmx.bones[right_ik_sole_bone.name] = right_ik_sole_bone
pmx.bone_indexes[right_ik_sole_bone.index] = right_ik_sole_bone.name
if "左足IK" in pmx.bones:
# 左足IK底実体ボーン
left_ik_sole_vertex = Vertex(-1, MVector3D(pmx.bones["左足IK"].position.x(), 0, pmx.bones["左足IK"].position.z()), MVector3D(), [], [], Bdef1(-1), -1)
pmx.left_ik_sole_vertex = left_ik_sole_vertex
left_ik_sole_bone = Bone("左足IK底実体", "left ik ik_sole entity", left_ik_sole_vertex.position.copy(), -1, 0, 0)
left_ik_sole_bone.index = len(pmx.bones.keys())
pmx.bones[left_ik_sole_bone.name] = left_ik_sole_bone
pmx.bone_indexes[left_ik_sole_bone.index] = left_ik_sole_bone.name
if "右足IK親" in pmx.bones:
# 右足IK親底実体ボーン
right_ik_sole_vertex = Vertex(-1, MVector3D(pmx.bones["右足IK親"].position.x(), 0, pmx.bones["右足IK親"].position.z()), MVector3D(), [], [], Bdef1(-1), -1)
pmx.right_ik_sole_vertex = right_ik_sole_vertex
right_ik_sole_bone = Bone("右足IK親底実体", "right ik ik_sole entity", right_ik_sole_vertex.position.copy(), -1, 0, 0)
right_ik_sole_bone.index = len(pmx.bones.keys())
pmx.bones[right_ik_sole_bone.name] = right_ik_sole_bone
pmx.bone_indexes[right_ik_sole_bone.index] = right_ik_sole_bone.name
if "左足IK親" in pmx.bones:
# 左足IK親底実体ボーン
left_ik_sole_vertex = Vertex(-1, MVector3D(pmx.bones["左足IK親"].position.x(), 0, pmx.bones["左足IK親"].position.z()), MVector3D(), [], [], Bdef1(-1), -1)
pmx.left_ik_sole_vertex = left_ik_sole_vertex
left_ik_sole_bone = Bone("左足IK親底実体", "left ik ik_sole entity", left_ik_sole_vertex.position.copy(), -1, 0, 0)
left_ik_sole_bone.index = len(pmx.bones.keys())
pmx.bones[left_ik_sole_bone.name] = left_ik_sole_bone
pmx.bone_indexes[left_ik_sole_bone.index] = left_ik_sole_bone.name
if "右足首" in pmx.bones:
right_heel_bone = Bone("右かかと", "", MVector3D(pmx.bones["右足首"].position.x(), 0, pmx.bones["右足首"].position.z()), -1, 0, 0)
right_heel_bone.parent_index = pmx.bones["右つま先"].index
right_heel_bone.index = len(pmx.bones.keys())
pmx.bones[right_heel_bone.name] = right_heel_bone
pmx.bone_indexes[right_heel_bone.index] = right_heel_bone.name
if "左足首" | |
# 3
(4, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 4
)
def __init__(self, success=None, ire=None, nfe=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.nfe = nfe
self.ue = ue
self.te = te
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ColumnOrSuperColumn()
self.success.read(iprot)
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.nfe = NotFoundException()
self.nfe.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
if self.ire != None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.nfe != None:
oprot.writeFieldBegin('nfe', TType.STRUCT, 2)
self.nfe.write(oprot)
oprot.writeFieldEnd()
if self.ue != None:
oprot.writeFieldBegin('ue', TType.STRUCT, 3)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te != None:
oprot.writeFieldBegin('te', TType.STRUCT, 4)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_slice_args:
"""
Attributes:
- key
- column_parent
- predicate
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
(2, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'predicate', (SlicePredicate, SlicePredicate.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, key=None, column_parent=None, predicate=None, consistency_level=thrift_spec[4][4],):
self.key = key
self.column_parent = column_parent
self.predicate = predicate
self.consistency_level = consistency_level
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.predicate = SlicePredicate()
self.predicate.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_slice_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
if self.column_parent != None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 2)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.predicate != None:
oprot.writeFieldBegin('predicate', TType.STRUCT, 3)
self.predicate.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level != None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocol.TProtocolException(message='Required field key is unset!')
if self.column_parent is None:
raise TProtocol.TProtocolException(message='Required field column_parent is unset!')
if self.predicate is None:
raise TProtocol.TProtocolException(message='Required field predicate is unset!')
if self.consistency_level is None:
raise TProtocol.TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_slice_result:
"""
Attributes:
- success
- ire
- ue
- te
"""
thrift_spec = (
(0, TType.LIST, 'success', (TType.STRUCT,(ColumnOrSuperColumn, ColumnOrSuperColumn.thrift_spec)), None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, ire=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.LIST:
self.success = []
(_etype91, _size88) = iprot.readListBegin()
for _i92 in xrange(_size88):
_elem93 = ColumnOrSuperColumn()
_elem93.read(iprot)
self.success.append(_elem93)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_slice_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.LIST, 0)
oprot.writeListBegin(TType.STRUCT, len(self.success))
for iter94 in self.success:
iter94.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ire != None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue != None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te != None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_count_args:
"""
Attributes:
- key
- column_parent
- predicate
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'key', None, None, ), # 1
(2, TType.STRUCT, 'column_parent', (ColumnParent, ColumnParent.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'predicate', (SlicePredicate, SlicePredicate.thrift_spec), None, ), # 3
(4, TType.I32, 'consistency_level', None, 1, ), # 4
)
def __init__(self, key=None, column_parent=None, predicate=None, consistency_level=thrift_spec[4][4],):
self.key = key
self.column_parent = column_parent
self.predicate = predicate
self.consistency_level = consistency_level
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.key = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.column_parent = ColumnParent()
self.column_parent.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.predicate = SlicePredicate()
self.predicate.read(iprot)
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('get_count_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
if self.column_parent != None:
oprot.writeFieldBegin('column_parent', TType.STRUCT, 2)
self.column_parent.write(oprot)
oprot.writeFieldEnd()
if self.predicate != None:
oprot.writeFieldBegin('predicate', TType.STRUCT, 3)
self.predicate.write(oprot)
oprot.writeFieldEnd()
if self.consistency_level != None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocol.TProtocolException(message='Required field key is unset!')
if self.column_parent is None:
raise TProtocol.TProtocolException(message='Required field column_parent is unset!')
if self.predicate is None:
raise TProtocol.TProtocolException(message='Required field predicate is unset!')
if self.consistency_level is None:
raise TProtocol.TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class get_count_result:
"""
Attributes:
- success
- ire
- ue
- te
"""
thrift_spec = (
(0, TType.I32, 'success', None, None, ), # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, success=None, ire=None, ue=None, te=None,):
self.success = success
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32();
else:
iprot.skip(ftype)
elif fid == | |
# Import standard functions from numpy.
import numpy as np
from numpy.random import normal
# Import matplotlib and set related parameters.
import matplotlib.pyplot as plt
fig_width = 12
# Import SciPy utility functions for linear dynamical systems.
from scipy.signal import lti
from scipy.signal import dlti, dlsim
# Import standard linear algebra functions from SciPy.
from scipy.linalg import norm
# Import various DMD algorithms available in PyDMD.
from pydmd import DMD, OptDMD
def harmonic_oscillators(N=10, omega=0.1, alpha=0.2, gamma=0.05, dt=1.0):
"""
This function builds the discrete-time model of a chain of N coupled
weakly damped harmonic oscillators. All oscillators are identical to
one another and have a nearest-neighbour coupling.
Parameters
----------
N : integer
The number of oscillators forming the chain (default 10).
omega : float
The natural frequency of the base oscillator (default 0.1).
alpha : float
The nearest-neighbour coupling strength (default 0.2).
gamma : float
The damping parameter (default 0.05).
dt : float
The sampling period for the continuous-to-discrete time conversion (default 1.0).
Returns
-------
dsys : scipy.signal.dlti
The corresponding discrete-time state-space model.
"""
# Miscellaneous imports.
from scipy.sparse import diags, identity, bmat, block_diag
# Build the stiffness matrix.
K_ii = np.ones((N,)) * (omega**2/2.0 + alpha) # Self-coupling.
K_ij = np.ones((N-1,)) * (-alpha/2.0) # Nearest-neighbor coupling.
K = diags([K_ij, K_ii, K_ij], offsets=[-1, 0, 1]) # Assembles the stiffness matrix.
# Build the friction matrix.
G = gamma * identity(N)
# Build the dynamic matrix.
A = bmat([[None, identity(N)], [-K, -G]])
# Build the control matrix.
B = bmat([[0*identity(N)], [identity(N)]])
# Build the observation matrix.
C = identity(2*N)
# Build the feedthrough matrix.
D = bmat([[0*identity(N)], [0*identity(N)]])
# SciPy continuous-time LTI object.
sys = lti(A.toarray(), B.toarray(), C.toarray(), D.toarray())
# Return the discrete-time equivalent.
return sys.to_discrete(dt)
# Get the discrete-time LTI model.
N = 50 # Number of oscillators (each has 2 degrees of freedom so the total size of the system is 2N).
dsys = harmonic_oscillators(N=N) # Build the model.
# Training initial condition.
x0_train = normal(loc=0.0, scale=1.0, size=(dsys.A.shape[1]))
# Run simulation to generate dataset.
t, _, x_train = dlsim(dsys, np.zeros((2000, dsys.inputs)), x0=x0_train)
def plot_training_dataset(t, x_train):
"""
This is a simple utility function to plot the time-series forming our training dataset.
Parameters
----------
t : array-like, shape (n_samples,)
The time instants.
x_train : array-like, shape (n_samples, n_dof)
The time-series of our system.
"""
# Setup the figure.
fig, axes = plt.subplots(1, 2, sharex=True, figsize=(fig_width, fig_width/6))
# Plot the oscillators' positions.
axes[0].plot(t, x_train[:, :dsys.inputs], alpha=0.5)
# Add decorators.
axes[0].set_ylabel(r"$q_i[k]$")
# Plot the oscillators'velocities.
axes[1].plot(t, x_train[:, dsys.inputs:], alpha=0.5)
# Add decorators.
axes[1].set(xlim=(t.min(), t.max()), xlabel=r"k", ylabel=r"$p_i[k]$")
return
plot_training_dataset(t, x_train)
plt.show()
def rank_sensitvity(dsys, x_train, n_test=100):
"""
This function using the generated training dataset to fit DMD and OptDMD models of increasing rank.
It also computes the test error on an ensemble of testing dataset to get a better estimation of the
generalization capabilities of the fitted models.
Parameters
----------
dsys : scipy.signal.dlti
The discrete LTI system considered.
x_train : array-like, shape (n_features, n_samples)
The training dataset.
NOTE : It is transposed compared to the output of dsys.
n_test : int
The number of testing datasets to be generated.
Returns
-------
dmd_train_error : array-like, shape (n_ranks,)
The reconstruction error of the DMD model on the training data.
dmd_test_error : array-like, shape (n_ranks, n_test)
The reconstruction error of the DMD model on the various testing datasets.
optdmd_train_error : array-like, shape (n_ranks,)
The reconstruction error of the OptDMD model on the training data.
optdmd_test_error : array-like, shape (n_ranks, n_test)
The reconstruction error of the OptDMD model on the various testing datasets.
"""
dmd_train_error, optdmd_train_error = list(), list()
dmd_test_error, optdmd_test_error = list(), list()
# Split the training data into input/output snapshots.
y_train, X_train = x_train[:, 1:], x_train[:, :-1]
for rank in range(1, dsys.A.shape[0]+1):
# Fit the DMD model (Schmid's algorithm)
dmd = DMD(svd_rank=rank).fit(x_train)
# Fit the DMD model (optimal closed-form solution)
optdmd = OptDMD(svd_rank=rank, factorization="svd").fit(x_train)
# One-step ahead prediction using both DMD models.
y_predict_dmd = dmd.predict(X_train)
y_predict_opt = optdmd.predict(X_train)
# Compute the one-step ahead prediction error.
dmd_train_error.append(norm(y_predict_dmd-y_train)/norm(y_train))
optdmd_train_error.append(norm(y_predict_opt-y_train)/norm(y_train))
# Evaluate the error on test data.
dmd_error, optdmd_error = list(), list()
for _ in range(n_test):
# Test initial condition.
x0_test = normal(loc=0.0, scale=1.0, size=(dsys.A.shape[1]))
# Run simulation to generate dataset.
t, _, x_test = dlsim(dsys, np.zeros((250, dsys.inputs)), x0=x0_test)
# Split the training data into input/output snapshots.
y_test, X_test = x_test.T[:, 1:], x_test.T[:, :-1]
# One-step ahead prediction using both DMD models.
y_predict_dmd = dmd.predict(X_test)
y_predict_opt = optdmd.predict(X_test)
# Compute the one-step ahead prediction error.
dmd_error.append(norm(y_predict_dmd-y_test)/norm(y_test))
optdmd_error.append(norm(y_predict_opt-y_test)/norm(y_test))
# Store the error for rank i DMD.
dmd_test_error.append(np.asarray(dmd_error))
optdmd_test_error.append(np.asarray(optdmd_error))
# Complete rank-sensitivity.
dmd_test_error = np.asarray(dmd_test_error)
optdmd_test_error = np.asarray(optdmd_test_error)
dmd_train_error = np.asarray(dmd_train_error)
optdmd_train_error = np.asarray(optdmd_train_error)
return dmd_train_error, dmd_test_error, optdmd_train_error, optdmd_test_error
def plot_rank_sensitivity(dmd_train_error, dmd_test_error, optdmd_train_error, optdmd_test_error):
"""
Simple utility function to plot the results from the rank sensitivity analysis.
Parameters
----------
dmd_train_error : array-like, shape (n_ranks,)
The reconstruction error of the DMD model on the training data.
dmd_test_error : array-like, shape (n_ranks, n_test)
The reconstruction error of the DMD model on the various testing datasets.
optdmd_train_error : array-like, shape (n_ranks,)
The reconstruction error of the OptDMD model on the training data.
optdmd_test_error : array-like, shape (n_ranks, n_test)
The reconstruction error of the OptDMD model on the various testing datasets.
"""
# Generate figure.
fig, axes = plt.subplots(1, 2, figsize=(fig_width, fig_width/4), sharex=True, sharey=True)
# Misc.
rank = np.arange(1, dmd_test_error.shape[0]+1)
#####
# TRAINING ERROR
#####
# Plot the vanilla DMD error.
axes[0].plot(rank, dmd_train_error)
# Plot the OptDMD error.
axes[0].plot(rank, optdmd_train_error, ls="--")
# Add decorators.
axes[0].set(
xlabel=r"Rank of the DMD model", ylabel=r"Normalized error", title=r"Training dataset"
)
axes[0].grid(True)
#####
# TESTING ERROR
#####
# Plot the vanilla DMD error.
axes[1].plot(rank, np.mean(dmd_test_error, axis=1), label=r"Regular DMD")
axes[1].fill_between(
rank,
np.mean(dmd_test_error, axis=1) + np.std(dmd_test_error, axis=1),
np.mean(dmd_test_error, axis=1) - np.std(dmd_test_error, axis=1),
alpha=0.25,
)
# Plot the OptDMD error.
axes[1].plot(rank, np.mean(optdmd_test_error, axis=1), ls="--", label=r"Optimal DMD")
axes[1].fill_between(
rank,
np.mean(optdmd_test_error, axis=1) + np.std(optdmd_test_error, axis=1),
np.mean(optdmd_test_error, axis=1) - np.std(optdmd_test_error, axis=1),
alpha=0.25,
)
# Add decorators.
axes[1].set(
xlim=(0, rank.max()), xlabel=r"Rank of the DMD model",
ylim=(0, 1),
title=r"Testing dataset"
)
axes[1].grid(True)
axes[1].legend(loc=0)
return
# Run the rank-sensitivity analysis.
output = rank_sensitvity(dsys, x_train.T)
# Keep for later use.
long_time_series_optdmd_train, long_time_series_optdmd_test = output[2], output[3]
# Plot the results.
plot_rank_sensitivity(*output)
plt.show()
# Training initial condition.
x0_train = normal(loc=0.0, scale=1.0, size=(dsys.A.shape[1]))
# Run simulation to generate dataset.
t, _, x_train = dlsim(dsys, np.zeros((100, dsys.inputs)), x0=x0_train)
# Plot the corresponding training data.
plot_training_dataset(t, x_train)
plt.show()
# Run the rank-sensitivity analysis.
output = rank_sensitvity(dsys, x_train.T)
# Keep for later use.
short_time_series_optdmd_train, short_time_series_optdmd_test = output[2], output[3]
# Plot the results.
plot_rank_sensitivity(*output)
plt.show()
# ## Case 3 : Fitting a DMD model using an ensemble of trajectories
def generate_ensemble_time_series(dsys, n_traj, len_traj):
"""
Utility function to generate a training dataset formed by an ensemble of time-series.
Parameters
-----------
dsys : scipy.signal.dlti
The discrete LTI system considered.
n_traj : int
The numbr of trajectories forming our ensemble.
len_traj : int
The length of each time-series.
Returns
-------
X : array-like, shape (n_features, n_samples)
The input to the system.
Y : array-like, shape (n_features, n_samples)
The output of the system.
"""
for i in range(n_traj):
# Training initial condition.
x0_train = normal(loc=0.0, scale=1.0, size=(dsys.A.shape[1]))
# Run simulation to generate dataset.
t, _, x = dlsim(dsys, np.zeros((len_traj, dsys.inputs)), x0=x0_train)
# Store the data.
if i == 0:
X, Y = x.T[:, :-1], x.T[:, 1:]
else:
X, Y = np.c_[X, x.T[:, :-1]], np.c_[Y, x.T[:, 1:]]
return X, Y
def rank_sensitvity_bis(dsys, X, Y, n_test=100):
"""
Same as before but for the ensemble training. Note that no DMD model is fitted, only OptDMD.
"""
optdmd_train_error, optdmd_test_error = list(), list()
# Fit a DMD model for each possible rank.
for rank in range(1, dsys.A.shape[0]+1):
# Fit the DMD model (optimal closed-form solution)
optdmd = OptDMD(svd_rank=rank, factorization="svd").fit(X, Y)
# One-step ahead prediction using both DMD models.
y_predict_opt = optdmd.predict(X)
# Compute the one-step ahead prediction error.
optdmd_train_error.append(norm(y_predict_opt-Y)/norm(Y))
# Evaluate the error on test data.
optdmd_error = list()
for _ in range(n_test):
# Test initial condition.
x0_test = normal(loc=0.0, scale=1.0, size=(dsys.A.shape[1]))
# Run simulation to generate dataset.
t, _, x_test = dlsim(dsys, np.zeros((250, dsys.inputs)), x0=x0_test)
# Split the training data into input/output snapshots.
y_test, X_test = x_test.T[:, 1:], | |
<filename>src/config/device-manager/test/test_dm_ansible_dci_gateway.py
#
# Copyright (c) 2020 Juniper Networks, Inc. All rights reserved.
#
from __future__ import absolute_import
from attrdict import AttrDict
from cfgm_common.tests.test_common import retries
from cfgm_common.tests.test_common import retry_exc_handler
import gevent
import mock
from vnc_api.vnc_api import DataCenterInterconnect, \
IpamSubnetType, LogicalRouter, NetworkIpam, \
SubnetType, VirtualMachineInterface, VirtualNetwork, \
VirtualNetworkType, VnSubnetsType
from .test_dm_ansible_common import TestAnsibleCommonDM
class TestAnsibleDciGateway(TestAnsibleCommonDM):
def setUp(self, extra_config_knobs=None):
super(TestAnsibleDciGateway, self).setUp(
extra_config_knobs=extra_config_knobs)
self.idle_patch = mock.patch('gevent.idle')
self.idle_mock = self.idle_patch.start()
def tearDown(self):
self.idle_patch.stop()
super(TestAnsibleDciGateway, self).tearDown()
def _delete_objects(self):
for obj in self.physical_routers:
self._vnc_lib.physical_router_delete(id=obj.get_uuid())
for obj in self.bgp_routers:
self._vnc_lib.bgp_router_delete(id=obj.get_uuid())
for obj in self.role_configs:
self._vnc_lib.role_config_delete(id=obj.get_uuid())
for obj in self.node_profiles:
self._vnc_lib.node_profile_delete(id=obj.get_uuid())
for obj in self.fabrics:
self._vnc_lib.fabric_delete(id=obj.get_uuid())
for obj in self.job_templates:
self._vnc_lib.job_template_delete(id=obj.get_uuid())
self.delete_role_definitions()
self.delete_features()
self.delete_overlay_roles()
self.delete_physical_roles()
# end _delete_objects
@retries(5, hook=retry_exc_handler)
def check_lr_internal_vn_state(self, lr_obj):
internal_vn_name = '__contrail_lr_internal_vn_' + lr_obj.uuid + '__'
vn_fq = lr_obj.get_fq_name()[:-1] + [internal_vn_name]
vn_obj = None
vn_obj = self._vnc_lib.virtual_network_read(fq_name=vn_fq)
vn_obj_properties = vn_obj.get_virtual_network_properties()
if not vn_obj_properties:
raise Exception("LR Internal VN properties are not set")
fwd_mode = vn_obj_properties.get_forwarding_mode()
if fwd_mode != 'l3':
raise Exception("LR Internal VN Forwarding mode is not set to L3")
return vn_obj
# end check_lr_internal_vn_state
def _init_fabric_prs(self):
self.features = {}
self.role_definitions = {}
self.feature_configs = []
self.job_templates = []
self.fabrics = []
self.bgp_routers = []
self.node_profiles = []
self.role_configs = []
self.physical_routers = []
# end _init_fabric_prs
def _create_node_profile(self, name, device_family, role, rb_roles,
job_temp):
np1, rc1 = self.create_node_profile(
'node-profile-' + name,
device_family=device_family,
role_mappings=[
AttrDict({
'physical_role': role,
'rb_roles': rb_roles
})
],
job_template=job_temp)
self.node_profiles.append(np1)
self.role_configs.append(rc1)
return np1, rc1
# end _create_node_profile
def _create_fabrics_prs(self, dict_fabrics, dict_prs, name="DCI"):
self._init_fabric_prs()
self.create_features(['overlay-bgp'])
self.create_physical_roles(['leaf', 'spine'])
ov_roles = ['DCI-Gateway']
self.create_overlay_roles(ov_roles)
self.create_role_definitions([
AttrDict({
'name': 'dci-gateway@spine',
'physical_role': 'spine',
'overlay_role': 'DCI-Gateway',
'features': ['overlay-bgp'],
'feature_configs': None
}),
AttrDict({
'name': 'dci-gateway@leaf',
'physical_role': 'leaf',
'overlay_role': 'DCI-Gateway',
'features': ['overlay-bgp'],
'feature_configs': None
})
])
jt = self.create_job_template('job-template-' + name + self.id())
self.job_templates.append(jt)
np_spine, rc_spine = self._create_node_profile(
name + '-spine' + self.id(), 'junos-qfx',
'spine', ['DCI-Gateway'], jt)
np_leaf, rc_leaf = self._create_node_profile(
name + '-leaf' + self.id(), 'junos-qfx',
'leaf', ['DCI-Gateway'], jt)
num = 32
for f_name in dict_fabrics.keys():
fabric = self.create_fabric(f_name + self.id())
self.fabrics.append(fabric)
dict_fabrics[f_name] = fabric
for prname in dict_prs.keys():
if f_name not in prname:
continue
role = 'spine' if 'PR1_' in prname else 'leaf'
np = np_spine if 'PR1_' in prname else np_leaf
br, pr = self.create_router(
prname + self.id(), '7.7.7.%s' % num,
product='qfx10002' if 'PR1_' in prname else 'mx240',
family='junos-qfx' if 'PR1_' in prname else 'junos',
role=role,
rb_roles=['DCI-Gateway'],
physical_role=self.physical_roles[role],
overlay_role=self.overlay_roles['DCI-Gateway'],
fabric=fabric, node_profile=np)
pr.set_physical_router_loopback_ip('30.30.0.%s' % num)
num += 1
self._vnc_lib.physical_router_update(pr)
self.physical_routers.append(pr)
self.bgp_routers.append(br)
dict_prs[prname]["br"] = br
dict_prs[prname]["pr"] = pr
return
# end _create_fabrics_prs
def create_vn_ipam(self, id):
ipam1_obj = NetworkIpam('ipam' + '-' + id)
ipam1_uuid = self._vnc_lib.network_ipam_create(ipam1_obj)
return self._vnc_lib.network_ipam_read(id=ipam1_uuid)
# end create_vn_ipam
def create_vn_with_subnets(self, id, vn_name, ipam_obj, subnet,
subnetmask=24):
vn_obj = VirtualNetwork(vn_name)
vn_obj_properties = VirtualNetworkType()
vn_obj_properties.set_vxlan_network_identifier(2000 + id)
vn_obj_properties.set_forwarding_mode('l2_l3')
vn_obj.set_virtual_network_properties(vn_obj_properties)
vn_obj.add_network_ipam(ipam_obj, VnSubnetsType(
[IpamSubnetType(SubnetType(subnet, subnetmask))]))
vn_uuid = self._vnc_lib.virtual_network_create(vn_obj)
vn_obj_rd = self._vnc_lib.virtual_network_read(id=vn_uuid)
# make sure RT for vn is created
rt = []
try:
rt = self._get_route_target(vn_obj_rd)
except Exception:
pass
return vn_obj, self._vnc_lib.virtual_network_read(id=vn_uuid), rt
# end create_vn_with_subnets
def make_vn_name(self, subnet):
return "VN_%s" % subnet
def make_lr_name(self, subnet1, pr_name):
return "LR_%s_%s" % (subnet1, pr_name)
def create_lr(self, lrname, vns, prs, vmis, dict_vn_rt):
lr_fq_name = ['default-domain', 'default-project', lrname]
lr = LogicalRouter(fq_name=lr_fq_name, parent_type='project',
logical_router_type='vxlan-routing')
for pr in prs:
probj = self._vnc_lib.physical_router_read(id=pr.get_uuid())
lr.add_physical_router(probj)
for vn in vns:
vminame = 'vmi-lr-to-vn' + vn.get_display_name()
fq_name1 = ['default-domain', 'default-project', vminame]
vmi = VirtualMachineInterface(fq_name=fq_name1,
parent_type='project')
vmi.set_virtual_network(vn)
self._vnc_lib.virtual_machine_interface_create(vmi)
vmis[vminame] = vmi
lr.add_virtual_machine_interface(vmi)
lr.set_logical_router_type('vxlan-routing')
lr_uuid = self._vnc_lib.logical_router_create(lr)
# make sure internal is created
try:
vn_obj = self.check_lr_internal_vn_state(lr)
dict_vn_rt[lrname] = self._get_route_target(vn_obj)
except Exception:
pass
return lr, self._vnc_lib.logical_router_read(id=lr_uuid)
# end create_lr
def _make_ri_comments(self, vn_obj, vrf_mode, fwd_mode="L2-L3",
prefix=' Public'):
return "/*%s Virtual Network: %s, UUID: %s, VRF Type: %s, " \
"Forwarding Mode: %s */" % \
(prefix, vn_obj.get_fq_name()[-1], vn_obj.get_uuid(),
vrf_mode, fwd_mode)
# end _make_ri_comments
@retries(4, hook=retry_exc_handler)
def _get_route_target(self, vn_obj):
ri_list = vn_obj.get_routing_instances() or []
if len(ri_list) == 0:
raise Exception("RI of vn %s is empty!!" %
vn_obj.get_fq_name()[-1])
for ri in ri_list:
ri_uuid = ri.get('uuid')
riobj = self._vnc_lib.routing_instance_read(id=ri_uuid)
if not riobj:
continue
rt_refs = riobj.get_route_target_refs() or []
if len(rt_refs) == 0:
raise Exception("RT of vn %s RI %s is empty!! Retrying..." %
(vn_obj.get_fq_name()[-1],
riobj.get_fq_name()[-1]))
for rt in rt_refs:
return rt.get('to')[0]
print("VN %s RT Empty!! Retrying..." % (vn_obj.get_fq_name()[-1]))
return ''
# end _get_route_target
def get_dci_policy_name(self, obj):
return "__contrail_%s_%s-import" % (
obj.get_fq_name()[-1], obj.get_uuid())
def get_dci_policy_comment(self, dci):
return "/* %s DataCenter InterConnect: %s, UUID: %s */" % (
dci.get_data_center_interconnect_mode(), dci.get_fq_name()[-1],
dci.get_uuid())
def get_asn_and_addr(self, obj):
rd_obj = self._vnc_lib.bgp_router_read(id=obj.get_uuid())
return rd_obj._bgp_router_parameters.autonomous_system, \
rd_obj._bgp_router_parameters.address, \
rd_obj._bgp_router_parameters.address_families.family, \
rd_obj._bgp_router_parameters.hold_time
def get_bgp_name(self, dci_name, l_asn, p_asn):
return "_contrail_%s-%s" % (
dci_name, 'e' if l_asn != p_asn else 'i')
def create_bgp_policy(self, name, comment, import_targets):
return {'name': name, 'comment': comment,
'import_targets': import_targets}
def create_bgp_config(self, name, l_addr, l_asn, l_family, l_hold_time):
return {'name': name,
'type_': 'external' if name.endswith('-e') else 'internal',
'ip_address': l_addr,
'autonomous_system': l_asn,
'families': l_family,
'hold_time': l_hold_time,
'import_policy': [],
'peers': [],
'policies': []}
def add_peers_to_bgp_config(self, config, p_address, p_asn):
config['peers'].append({'ip_address': p_address,
'autonomous_system': p_asn})
def _create_bgp_abstract_cfg(self, pr_name, dict_prs,
dci_names, dict_dcis, dict_lrs,
vnlist, dict_vns, dict_vn_rt, dict_fabrics):
bgp_cfgs = {}
l2 = False
l3 = False
l2_import_policy = []
l3_import_policy = []
l2_policies = []
l3_policies = []
for name in dci_names:
if 'l2' in name:
l2 = True
for vn_name in vnlist:
policy_name = self.get_dci_policy_name(dict_vns[vn_name])
l2_policies.append(self.create_bgp_policy(
name=policy_name,
comment=self.get_dci_policy_comment(dict_dcis[name]),
import_targets=[dict_vn_rt[vn_name]]))
l2_import_policy.append(policy_name)
elif 'l3' in name:
l3 = True
for lr_name, obj in dict_lrs.items():
if pr_name not in lr_name:
policy_name = self.get_dci_policy_name(obj)
l3_import_policy.append(policy_name)
l3_policies.append(self.create_bgp_policy(
name=policy_name,
comment=self.get_dci_policy_comment(
dict_dcis[name]),
import_targets=[dict_vn_rt[lr_name]]))
peer_fabric = 'fabric2' if 'fabric1' in pr_name else 'fabric1'
peer_prs = []
for name in dict_prs.keys():
if peer_fabric in name:
peer_prs.append(name)
l_asn, l_addr, l_family, l_hold_time = \
self.get_asn_and_addr(dict_prs[pr_name]['br'])
if l2 == True and l3 == True:
# dci_names.sort()
for peer_name in peer_prs:
p_asn, p_addr, _, _ = self.get_asn_and_addr(
dict_prs[peer_name]['br'])
bgp_name = self.get_bgp_name(
dict_fabrics[peer_fabric].get_fq_name()[-1], l_asn, p_asn)
if bgp_name not in bgp_cfgs:
import_policy = []
policies = []
if 'PR1_' in pr_name:
import_policy.extend(l3_import_policy)
policies.extend(l3_policies)
import_policy.extend(l2_import_policy)
policies.extend(l2_policies)
bgp_cfgs[bgp_name] = self.create_bgp_config(
bgp_name, l_addr, l_asn, l_family, l_hold_time)
bgp_cfgs[bgp_name]['import_policy'].extend(import_policy)
bgp_cfgs[bgp_name]['policies'].extend(policies)
self.add_peers_to_bgp_config(bgp_cfgs[bgp_name],
p_addr, p_asn)
elif l3 == True:
if len(l3_import_policy) == 0:
return bgp_cfgs
for peer_name in peer_prs:
if 'PR1_' not in peer_name:
continue
p_asn, p_addr, _, _ = self.get_asn_and_addr(
dict_prs[peer_name]['br'])
bgp_name = self.get_bgp_name(
dict_fabrics[peer_fabric].get_fq_name()[-1], l_asn, p_asn)
if bgp_name not in bgp_cfgs:
bgp_cfgs[bgp_name] = self.create_bgp_config(
bgp_name, l_addr, l_asn, l_family, l_hold_time)
bgp_cfgs[bgp_name]['import_policy'].extend(
l3_import_policy)
bgp_cfgs[bgp_name]['policies'].extend(l3_policies)
self.add_peers_to_bgp_config(bgp_cfgs[bgp_name],
p_addr, p_asn)
elif l2 == True:
for peer_name in peer_prs:
p_asn, p_addr, _, _ = self.get_asn_and_addr(
dict_prs[peer_name]['br'])
bgp_name = self.get_bgp_name(
dict_fabrics[peer_fabric].get_fq_name()[-1], l_asn, p_asn)
if bgp_name not in bgp_cfgs:
bgp_cfgs[bgp_name] = self.create_bgp_config(
bgp_name, l_addr, l_asn, l_family, l_hold_time)
bgp_cfgs[bgp_name]['import_policy'].extend(
l2_import_policy)
bgp_cfgs[bgp_name]['policies'].extend(l2_policies)
self.add_peers_to_bgp_config(bgp_cfgs[bgp_name],
p_addr, p_asn)
return bgp_cfgs
# end _create_bgp_abstract_cfg
def _get_abstract_cfg_bgp(self, a_bgp, bgp_name):
for bgp in a_bgp:
if bgp.get('name', '') == bgp_name:
return bgp
return None
def _validate_abstract_cfg_bgp_dci(self, e_bgps, a_bgp):
for bgp_name, e_bgp in e_bgps.items():
bgp = self._get_abstract_cfg_bgp(a_bgp, bgp_name)
self.assertIsNotNone(bgp)
self.assertEqual(bgp.get('name'), e_bgp['name'])
self.assertEqual(bgp.get('type_'), e_bgp['type_'])
self.assertEqual(bgp.get('ip_address'), e_bgp['ip_address'])
self.assertEqual(bgp.get('hold_time'), e_bgp['hold_time'])
self.assertEqual(bgp.get('autonomous_system'),
e_bgp['autonomous_system'])
if len(e_bgp['families']) > 0:
families = bgp.get('families')
self.assertIsNotNone(families)
self.assertEqual(len(families),
len(e_bgp['families']))
for e_families in e_bgp['families']:
if e_families == 'e-vpn':
self.assertIn('evpn', families)
else:
self.assertIn(e_families, families)
peers = bgp.get('peers')
self.assertIsNotNone(peers)
self.assertEqual(len(peers), len(e_bgp['peers']))
for e_peer in e_bgp['peers']:
for peer in peers:
if peer.get('ip_address') == e_peer['ip_address']:
self.assertEqual(peer.get('autonomous_system'),
e_peer['autonomous_system'])
break
if len(e_bgp['import_policy']) > 0:
import_policy = bgp.get('import_policy')
self.assertIsNotNone(import_policy)
self.assertEqual(len(import_policy),
len(e_bgp['import_policy']))
for import_p in e_bgp['import_policy']:
self.assertIn(import_p, import_policy)
if len(e_bgp['policies']) > 0:
policies = bgp.get('policies')
self.assertIsNotNone(policies)
self.assertEqual(len(policies),
len(e_bgp['policies']))
for e_policy in e_bgp['policies']:
for policy in policies:
if policy.get('name', '') == e_policy['name']:
self.assertEqual(policy.get('comment'),
e_policy['comment'])
import_targets = policy.get('import_targets')
self.assertIsNotNone(import_targets)
self.assertEqual(len(import_targets),
len(e_policy['import_targets']))
for e_i_target in e_policy['import_targets']:
self.assertIn(e_i_target, import_targets)
break
# end _validate_abstract_cfg_bgp_dci
@retries(4, hook=retry_exc_handler)
def _validate_abstract_cfg_dci_gateway(self, pr_name, dict_prs,
dci_names, dict_dcis, dict_lrs,
vnlist, dict_vns, dict_vn_rt,
dict_fabrics):
pr_obj = dict_prs[pr_name]['pr']
pr_new_name = ''
if 'PR1_' in pr_name:
pr_new_name = 'qfx10008' if \
pr_obj.get_physical_router_product_name() == 'qfx10002' \
else 'qfx10002'
else:
pr_new_name = 'mx240' if \
pr_obj.get_physical_router_product_name() == 'mx80' \
else 'mx80'
pr_obj.set_physical_router_product_name(pr_new_name)
self._vnc_lib.physical_router_update(pr_obj)
gevent.sleep(2)
ac1 = self.check_dm_ansible_config_push()
dac = ac1.get('device_abstract_config')
self.assertIsNotNone(dac.get('features'))
o_bgp = dac.get('features').get('overlay-bgp')
self.assertIsNotNone(o_bgp)
o_bgp = dac.get('features').get('overlay-bgp')
self.assertIsNotNone(o_bgp)
self.assertEqual(o_bgp.get('name'), 'overlay-bgp')
a_bgp = o_bgp.get('bgp')
self.assertIsNotNone(a_bgp)
if pr_name not in dac.get('system', {}).get('name', ''):
error = "Looking for Abstract config for %s But " \
"recieved config for %s, retrying..." % \
(pr_name, dac.get('system', {}).get('name', ''))
raise Exception(error)
# now create expected | |
# coding: utf-8
# pylint: disable=invalid-name, protected-access, too-many-locals, too-many-arguments, too-many-statements
"""Executor manager."""
from __future__ import absolute_import
import logging
import numpy as np
from .base import mx_real_t
from . import ndarray as nd
from .context import cpu
from .io import DataDesc
def _split_input_slice(batch_size, work_load_list):
"""Get input slice from the input shape.
Parameters
----------
batch_size : int
The number of samples in a mini-batch.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Returns
-------
slices : list of slice
The split slices to get a specific slice.
Raises
------
ValueError
If there are two many splits such that some slice can be empty.
"""
total_work_load = sum(work_load_list)
batch_num_list = [round(work_load * batch_size / total_work_load)
for work_load in work_load_list]
batch_num_sum = sum(batch_num_list)
if batch_num_sum < batch_size:
batch_num_list[-1] += batch_size - batch_num_sum
slices = []
end = 0
for batch_num in batch_num_list:
begin = int(min((end, batch_size)))
end = int(min((begin + batch_num, batch_size)))
if begin >= end:
raise ValueError('Too many slices such that some splits are empty')
slices.append(slice(begin, end))
return slices
def _check_arguments(symbol):
"""Check the argument names of symbol.
This function checks the duplication of arguments in Symbol.
The check is done for feedforward net for now.
Parameters
----------
symbol : Symbol
The network configuration.
"""
arg_set = set()
arg_names = symbol.list_arguments()
for name in arg_names:
if name in arg_set:
raise ValueError(('Find duplicated argument name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s') % (name, str(arg_names)))
arg_set.add(name)
aux_set = set()
aux_names = symbol.list_auxiliary_states()
for name in aux_names:
if name in aux_set:
raise ValueError(
('Find duplicated auxiliary param name \"%s\", ' +
'please make the weight name non-duplicated(using name arguments), ' +
'arguments are %s, auxiliary params are %s'
) % (name, str(arg_names), str(aux_names)))
aux_set.add(name)
def _load_general(data, targets):
"""Load a list of arrays into a list of arrays specified by slices."""
for d_src, d_targets in zip(data, targets):
if isinstance(d_targets, nd.NDArray):
d_src.copyto(d_targets)
else:
assert d_targets[-1][0].stop == d_src.shape[0], \
"Batch size miss match. Expected %d, got %d"%( \
d_targets[-1][0].stop, d_src.shape[0])
for slice_idx, d_dst in d_targets:
d_src[slice_idx].copyto(d_dst)
def _load_data(batch, targets):
"""Load data into sliced arrays."""
_load_general(batch.data, targets)
def _load_label(batch, targets):
"""Load label into sliced arrays."""
_load_general(batch.label, targets)
# pylint: disable=too-many-branches
def _bind_exec(sym, ctx, input_shapes, param_names, need_grad=False,
base_exec=None, shared_data_arrays=None, input_types=None, logger=logging):
"""bind executor for bucketing, potentially sharing data with an existing executor."""
arg_shape, _, aux_shape = sym.infer_shape(**input_shapes)
assert(arg_shape is not None)
if input_types is None:
input_types = {k: mx_real_t for k in input_shapes.keys()}
arg_types, _, aux_types = sym.infer_type(**input_types)
assert(arg_types is not None)
arg_arrays = []
grad_arrays = {} if need_grad != False else None
arg_names = sym.list_arguments()
if need_grad is False:
need_grad = set()
elif need_grad is True:
need_grad = set(arg_names) - set(input_shapes.keys())
elif isinstance(need_grad, set):
pass
else:
raise AssertionError("need_grad must be boolean or set.")
grad_req = {name:('write' if name in need_grad else 'null') for name in arg_names}
# create or borrow arguments and gradients
for i, name in enumerate(arg_names):
if not name in param_names:
# data or label
if shared_data_arrays is not None and \
name in shared_data_arrays:
arg_arr = shared_data_arrays[name]
if np.prod(arg_arr.shape) >= np.prod(arg_shape[i]):
# good, we can share this memory
assert(arg_types[i] == arg_arr.dtype)
arg_arr = arg_arr.reshape(arg_shape[i])
else:
logger.warning(('bucketing: data "%s" has a shape %s' % (name, arg_shape[i])) +
(', which is larger than already allocated ') +
('shape %s' % (arg_arr.shape,)) +
('. Need to re-allocate. Consider putting ') +
('default_bucket_key to be the bucket taking the largest ') +
('input for better memory sharing.'))
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
# replace existing shared array because the new one is bigger
shared_data_arrays[name] = arg_arr
else:
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
if shared_data_arrays is not None:
shared_data_arrays[name] = arg_arr
arg_arrays.append(arg_arr)
else:
# model parameter
if base_exec is None:
arg_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
if name in need_grad:
grad_arr = nd.zeros(arg_shape[i], ctx, dtype=arg_types[i])
grad_arrays[name] = grad_arr
else:
arg_arr = base_exec.arg_dict[name]
assert arg_arr.shape == arg_shape[i]
assert arg_arr.dtype == arg_types[i]
if name in need_grad:
grad_arrays[name] = base_exec.grad_dict[name]
arg_arrays.append(arg_arr)
# create or borrow aux variables
if base_exec is None:
aux_arrays = [nd.zeros(s, ctx, dtype=t) for s, t in zip(aux_shape, aux_types)]
else:
for i, a in enumerate(base_exec.aux_arrays):
assert aux_shape[i] == a.shape
assert aux_types[i] == a.dtype
aux_arrays = [a for a in base_exec.aux_arrays]
executor = sym.bind(ctx=ctx, args=arg_arrays, args_grad=grad_arrays,
aux_states=aux_arrays,
grad_req=grad_req, shared_exec=base_exec)
return executor
class DataParallelExecutorGroup(object):
"""A group of executors living on different devices, for data parallelization.
Parameters
----------
sym: Symbol
The network configuration.
arg_names: list of str
Equals `sym.list_arguments()`
param_names: list of str
List of names of all trainable parameters.
ctx: list of Context
List of devices for training (data parallelization).
slices: list of int
Describes how the data parallelization splits data into different devices.
train_data: DataIter (or DataBatch)
The dataset for training. It could be any object with `provide_data` and
`provide_label` properties. Loading of actual data is not necessarily needed
at this stage.
shared_grop: DataParallelExecutorGroup
An existing executor group, if to share parameters with it.
"""
def __init__(self, sym, arg_names, param_names, ctx, slices, train_data, shared_group=None):
# make sure the architecture is valid
_check_arguments(sym)
if shared_group is None:
self.shared_data_arrays = [{} for _ in ctx]
else:
self.shared_data_arrays = shared_group.shared_data_arrays
self.data_names = [x[0] for x in train_data.provide_data]
self.label_names = [x[0] for x in train_data.provide_label]
self.aux_names = sym.list_auxiliary_states()
self.param_idx = [i for i in range(len(arg_names)) if arg_names[i] in param_names]
self.param_names = [arg_names[i] for i in self.param_idx]
self.train_execs = []
for i, ctxi in enumerate(ctx):
data_shapes = {}
data_types = {}
for x in train_data.provide_data + train_data.provide_label:
data_shapes[x[0]] = tuple([slices[i].stop - slices[i].start] + list(x[1][1:]))
if isinstance(x, DataDesc):
data_types[x.name] = x.dtype
else:
data_types[x[0]] = mx_real_t
shared_exec = None if shared_group is None else shared_group.train_execs[i]
train_exec = _bind_exec(sym, ctxi, data_shapes, self.param_names,
need_grad=True, base_exec=shared_exec,
shared_data_arrays=self.shared_data_arrays[i],
input_types=data_types)
self.train_execs.append(train_exec)
# data structure
self.data_arrays = [[(slices[i], e.arg_dict[name]) for i, e in enumerate(self.train_execs)]
for name in self.data_names]
self.label_arrays = [[(slices[i], e.arg_dict[name]) for i, e in enumerate(self.train_execs)]
for name in self.label_names]
self.param_arrays = [[e.arg_arrays[i] for e in self.train_execs]
for i in self.param_idx]
self.grad_arrays = [[e.grad_arrays[i] for e in self.train_execs]
for i in self.param_idx]
self.aux_arrays = [[e.aux_arrays[i] for e in self.train_execs]
for i in range(len(self.aux_names))]
self.slices = slices
def load_data_batch(self, data_batch):
"""Load data and labels into arrays."""
_load_data(data_batch, self.data_arrays)
_load_label(data_batch, self.label_arrays)
def forward(self, is_train=False):
"""Perform a forward pass on each executor."""
for texec in self.train_execs:
texec.forward(is_train=is_train)
def backward(self):
"""Perform a backward pass on each executor."""
for texec in self.train_execs:
texec.backward()
def update_metric(self, metric, labels):
"""Update evaluation metric with label and current outputs."""
for texec, islice in zip(self.train_execs, self.slices):
labels_slice = [label[islice] for label in labels]
metric.update(labels_slice, texec.outputs)
class DataParallelExecutorManager(object):
""" Helper class to manage multiple executors for data parallelism.
Parameters
----------
symbol : Symbol
Output symbol.
ctx : list of Context
Devices to run on.
param_names: list of str
Name of all trainable parameters of the network.
arg_names: list of str
Name of all arguments of the network.
aux_names: list of str
Name of all auxiliary states of the network.
train_data : DataIter
Training data iterator.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ctx.
logger : logging logger
When not specified, default logger will be used.
sym_gen : A function that generate new Symbols depending on different
input shapes. Used only for bucketing.
"""
def __init__(self, symbol, ctx, train_data,
arg_names, param_names, aux_names,
work_load_list=None, logger=None, sym_gen=None):
if logger is None:
logger = logging
# preparation
num_device = len(ctx)
logger.info('Start training with %s', str(ctx))
if work_load_list is None:
work_load_list = [1] * num_device
assert isinstance(work_load_list, list) and len(work_load_list) == num_device, \
"Invalid settings for work load. "
slices = _split_input_slice(train_data.batch_size, work_load_list)
self.slices = slices
self.arg_names = arg_names
self.param_names = param_names
self.aux_names = aux_names
self.ctx = ctx
self.execgrp = DataParallelExecutorGroup(symbol, self.arg_names, self.param_names, self.ctx,
self.slices, train_data)
self.symbol = symbol
self.sym_gen = sym_gen
self.curr_execgrp = None # this is set when data is loaded
if self.sym_gen is not None:
self.execgrp_bucket = {train_data.default_bucket_key: self.execgrp}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.